blob: 4a8295399e7144bb8d80e6afdb354dcafad3d81d [file] [log] [blame]
Mitko Haralanovf727a0c2016-02-05 11:57:46 -05001/*
Jubin John05d6ac12016-02-14 20:22:17 -08002 * Copyright(c) 2015, 2016 Intel Corporation.
Mitko Haralanovf727a0c2016-02-05 11:57:46 -05003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mitko Haralanovf727a0c2016-02-05 11:57:46 -05009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mitko Haralanovf727a0c2016-02-05 11:57:46 -050020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47#include <asm/page.h>
Michael J. Ruhl1bb0d7b2017-02-08 05:28:31 -080048#include <linux/string.h>
Mitko Haralanovf727a0c2016-02-05 11:57:46 -050049
50#include "user_exp_rcv.h"
51#include "trace.h"
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -080052#include "mmu_rb.h"
Mitko Haralanovf727a0c2016-02-05 11:57:46 -050053
Mitko Haralanovb8abe342016-02-05 11:57:51 -050054struct tid_group {
55 struct list_head list;
56 unsigned base;
57 u8 size;
58 u8 used;
59 u8 map;
60};
61
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -080062struct tid_rb_node {
63 struct mmu_rb_node mmu;
Mitko Haralanovf727a0c2016-02-05 11:57:46 -050064 unsigned long phys;
Mitko Haralanovf727a0c2016-02-05 11:57:46 -050065 struct tid_group *grp;
66 u32 rcventry;
67 dma_addr_t dma_addr;
68 bool freed;
69 unsigned npages;
70 struct page *pages[0];
71};
72
Mitko Haralanovf88e0c82016-02-05 11:57:52 -050073struct tid_pageset {
74 u16 idx;
75 u16 count;
76};
77
Mitko Haralanovb8abe342016-02-05 11:57:51 -050078#define EXP_TID_SET_EMPTY(set) (set.count == 0 && list_empty(&set.list))
79
Mitko Haralanov3abb33a2016-02-05 11:57:54 -050080#define num_user_pages(vaddr, len) \
81 (1 + (((((unsigned long)(vaddr) + \
82 (unsigned long)(len) - 1) & PAGE_MASK) - \
83 ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
84
Mitko Haralanovf88e0c82016-02-05 11:57:52 -050085static void unlock_exp_tids(struct hfi1_ctxtdata *, struct exp_tid_set *,
Dean Luicke0b09ac2016-07-28 15:21:20 -040086 struct hfi1_filedata *);
Mitko Haralanov7e7a436e2016-02-05 11:57:57 -050087static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *);
Mitko Haralanovf88e0c82016-02-05 11:57:52 -050088static int set_rcvarray_entry(struct file *, unsigned long, u32,
Mitko Haralanov3abb33a2016-02-05 11:57:54 -050089 struct tid_group *, struct page **, unsigned);
Dean Luicke0b09ac2016-07-28 15:21:20 -040090static int tid_rb_insert(void *, struct mmu_rb_node *);
Ira Weiny2677a762016-07-28 15:21:26 -040091static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
92 struct tid_rb_node *tnode);
Dean Luick082b3532016-07-28 15:21:25 -040093static void tid_rb_remove(void *, struct mmu_rb_node *);
Dean Luicke0b09ac2016-07-28 15:21:20 -040094static int tid_rb_invalidate(void *, struct mmu_rb_node *);
Mitko Haralanovf88e0c82016-02-05 11:57:52 -050095static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
96 struct tid_pageset *, unsigned, u16, struct page **,
Mitko Haralanov7e7a436e2016-02-05 11:57:57 -050097 u32 *, unsigned *, unsigned *);
Mitko Haralanov455d7f12016-02-05 11:57:56 -050098static int unprogram_rcvarray(struct file *, u32, struct tid_group **);
Ira Weiny2677a762016-07-28 15:21:26 -040099static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800100
101static struct mmu_rb_ops tid_rb_ops = {
Dean Luicka7cd2dc2016-07-28 12:27:37 -0400102 .insert = tid_rb_insert,
103 .remove = tid_rb_remove,
104 .invalidate = tid_rb_invalidate
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800105};
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500106
107static inline u32 rcventry2tidinfo(u32 rcventry)
108{
109 u32 pair = rcventry & ~0x1;
110
111 return EXP_TID_SET(IDX, pair >> 1) |
112 EXP_TID_SET(CTRL, 1 << (rcventry - pair));
113}
Mitko Haralanovf727a0c2016-02-05 11:57:46 -0500114
Mitko Haralanovb8abe342016-02-05 11:57:51 -0500115static inline void exp_tid_group_init(struct exp_tid_set *set)
116{
117 INIT_LIST_HEAD(&set->list);
118 set->count = 0;
119}
120
121static inline void tid_group_remove(struct tid_group *grp,
122 struct exp_tid_set *set)
123{
124 list_del_init(&grp->list);
125 set->count--;
126}
127
128static inline void tid_group_add_tail(struct tid_group *grp,
129 struct exp_tid_set *set)
130{
131 list_add_tail(&grp->list, &set->list);
132 set->count++;
133}
134
135static inline struct tid_group *tid_group_pop(struct exp_tid_set *set)
136{
137 struct tid_group *grp =
138 list_first_entry(&set->list, struct tid_group, list);
139 list_del_init(&grp->list);
140 set->count--;
141 return grp;
142}
143
144static inline void tid_group_move(struct tid_group *group,
145 struct exp_tid_set *s1,
146 struct exp_tid_set *s2)
147{
148 tid_group_remove(group, s1);
149 tid_group_add_tail(group, s2);
150}
151
Mitko Haralanovf727a0c2016-02-05 11:57:46 -0500152/*
153 * Initialize context and file private data needed for Expected
154 * receive caching. This needs to be done after the context has
155 * been configured with the eager/expected RcvEntry counts.
156 */
157int hfi1_user_exp_rcv_init(struct file *fp)
158{
Mitko Haralanov3abb33a2016-02-05 11:57:54 -0500159 struct hfi1_filedata *fd = fp->private_data;
160 struct hfi1_ctxtdata *uctxt = fd->uctxt;
161 struct hfi1_devdata *dd = uctxt->dd;
162 unsigned tidbase;
163 int i, ret = 0;
164
Mitko Haralanov3abb33a2016-02-05 11:57:54 -0500165 spin_lock_init(&fd->tid_lock);
166 spin_lock_init(&fd->invalid_lock);
Mitko Haralanov3abb33a2016-02-05 11:57:54 -0500167
168 if (!uctxt->subctxt_cnt || !fd->subctxt) {
169 exp_tid_group_init(&uctxt->tid_group_list);
170 exp_tid_group_init(&uctxt->tid_used_list);
171 exp_tid_group_init(&uctxt->tid_full_list);
172
173 tidbase = uctxt->expected_base;
174 for (i = 0; i < uctxt->expected_count /
175 dd->rcv_entries.group_size; i++) {
176 struct tid_group *grp;
177
178 grp = kzalloc(sizeof(*grp), GFP_KERNEL);
179 if (!grp) {
180 /*
181 * If we fail here, the groups already
182 * allocated will be freed by the close
183 * call.
184 */
185 ret = -ENOMEM;
186 goto done;
187 }
188 grp->size = dd->rcv_entries.group_size;
189 grp->base = tidbase;
190 tid_group_add_tail(grp, &uctxt->tid_group_list);
191 tidbase += dd->rcv_entries.group_size;
192 }
193 }
194
Mitko Haralanova92ba6d2016-02-03 14:34:41 -0800195 fd->entry_to_rb = kcalloc(uctxt->expected_count,
196 sizeof(struct rb_node *),
197 GFP_KERNEL);
198 if (!fd->entry_to_rb)
199 return -ENOMEM;
200
Dean Luick622c2022016-07-28 15:21:21 -0400201 if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) {
Mitko Haralanov3abb33a2016-02-05 11:57:54 -0500202 fd->invalid_tid_idx = 0;
203 fd->invalid_tids = kzalloc(uctxt->expected_count *
204 sizeof(u32), GFP_KERNEL);
205 if (!fd->invalid_tids) {
206 ret = -ENOMEM;
207 goto done;
Mitko Haralanova92ba6d2016-02-03 14:34:41 -0800208 }
209
210 /*
211 * Register MMU notifier callbacks. If the registration
Dean Luick622c2022016-07-28 15:21:21 -0400212 * fails, continue without TID caching for this context.
Mitko Haralanova92ba6d2016-02-03 14:34:41 -0800213 */
Dean Luickb85ced92016-07-28 15:21:24 -0400214 ret = hfi1_mmu_rb_register(fd, fd->mm, &tid_rb_ops,
215 dd->pport->hfi1_wq,
216 &fd->handler);
Mitko Haralanova92ba6d2016-02-03 14:34:41 -0800217 if (ret) {
218 dd_dev_info(dd,
219 "Failed MMU notifier registration %d\n",
220 ret);
Mitko Haralanova92ba6d2016-02-03 14:34:41 -0800221 ret = 0;
Mitko Haralanov3abb33a2016-02-05 11:57:54 -0500222 }
223 }
224
Mitko Haralanov3abb33a2016-02-05 11:57:54 -0500225 /*
226 * PSM does not have a good way to separate, count, and
227 * effectively enforce a limit on RcvArray entries used by
228 * subctxts (when context sharing is used) when TID caching
229 * is enabled. To help with that, we calculate a per-process
230 * RcvArray entry share and enforce that.
231 * If TID caching is not in use, PSM deals with usage on its
232 * own. In that case, we allow any subctxt to take all of the
233 * entries.
234 *
235 * Make sure that we set the tid counts only after successful
236 * init.
237 */
Mitko Haralanov455d7f12016-02-05 11:57:56 -0500238 spin_lock(&fd->tid_lock);
Dean Luick622c2022016-07-28 15:21:21 -0400239 if (uctxt->subctxt_cnt && fd->handler) {
Mitko Haralanov3abb33a2016-02-05 11:57:54 -0500240 u16 remainder;
241
242 fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
243 remainder = uctxt->expected_count % uctxt->subctxt_cnt;
244 if (remainder && fd->subctxt < remainder)
245 fd->tid_limit++;
246 } else {
247 fd->tid_limit = uctxt->expected_count;
248 }
Mitko Haralanov455d7f12016-02-05 11:57:56 -0500249 spin_unlock(&fd->tid_lock);
Mitko Haralanov3abb33a2016-02-05 11:57:54 -0500250done:
251 return ret;
Mitko Haralanovf727a0c2016-02-05 11:57:46 -0500252}
253
254int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
255{
Mitko Haralanov3abb33a2016-02-05 11:57:54 -0500256 struct hfi1_ctxtdata *uctxt = fd->uctxt;
257 struct tid_group *grp, *gptr;
258
Mitko Haralanov94158442016-04-20 06:05:36 -0700259 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
260 return 0;
Mitko Haralanov3abb33a2016-02-05 11:57:54 -0500261 /*
262 * The notifier would have been removed when the process'es mm
263 * was freed.
264 */
Dean Luick622c2022016-07-28 15:21:21 -0400265 if (fd->handler)
Dean Luicke0b09ac2016-07-28 15:21:20 -0400266 hfi1_mmu_rb_unregister(fd->handler);
Mitko Haralanov3abb33a2016-02-05 11:57:54 -0500267
268 kfree(fd->invalid_tids);
269
270 if (!uctxt->cnt) {
271 if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
Dean Luicke0b09ac2016-07-28 15:21:20 -0400272 unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
Mitko Haralanov3abb33a2016-02-05 11:57:54 -0500273 if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
Dean Luicke0b09ac2016-07-28 15:21:20 -0400274 unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
Mitko Haralanov3abb33a2016-02-05 11:57:54 -0500275 list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
276 list) {
277 list_del_init(&grp->list);
278 kfree(grp);
279 }
Mitko Haralanov3abb33a2016-02-05 11:57:54 -0500280 hfi1_clear_tids(uctxt);
281 }
Mitko Haralanova92ba6d2016-02-03 14:34:41 -0800282
283 kfree(fd->entry_to_rb);
Mitko Haralanov3abb33a2016-02-05 11:57:54 -0500284 return 0;
Mitko Haralanovf727a0c2016-02-05 11:57:46 -0500285}
286
Mitko Haralanovb8abe342016-02-05 11:57:51 -0500287/*
288 * Write an "empty" RcvArray entry.
289 * This function exists so the TID registaration code can use it
290 * to write to unused/unneeded entries and still take advantage
291 * of the WC performance improvements. The HFI will ignore this
292 * write to the RcvArray entry.
293 */
294static inline void rcv_array_wc_fill(struct hfi1_devdata *dd, u32 index)
295{
296 /*
297 * Doing the WC fill writes only makes sense if the device is
298 * present and the RcvArray has been mapped as WC memory.
299 */
300 if ((dd->flags & HFI1_PRESENT) && dd->rcvarray_wc)
301 writeq(0, dd->rcvarray_wc + (index * 8));
302}
303
Mitko Haralanov7e7a436e2016-02-05 11:57:57 -0500304/*
305 * RcvArray entry allocation for Expected Receives is done by the
306 * following algorithm:
307 *
308 * The context keeps 3 lists of groups of RcvArray entries:
309 * 1. List of empty groups - tid_group_list
310 * This list is created during user context creation and
311 * contains elements which describe sets (of 8) of empty
312 * RcvArray entries.
313 * 2. List of partially used groups - tid_used_list
314 * This list contains sets of RcvArray entries which are
315 * not completely used up. Another mapping request could
316 * use some of all of the remaining entries.
317 * 3. List of full groups - tid_full_list
318 * This is the list where sets that are completely used
319 * up go.
320 *
321 * An attempt to optimize the usage of RcvArray entries is
322 * made by finding all sets of physically contiguous pages in a
323 * user's buffer.
324 * These physically contiguous sets are further split into
325 * sizes supported by the receive engine of the HFI. The
326 * resulting sets of pages are stored in struct tid_pageset,
327 * which describes the sets as:
328 * * .count - number of pages in this set
329 * * .idx - starting index into struct page ** array
330 * of this set
331 *
332 * From this point on, the algorithm deals with the page sets
333 * described above. The number of pagesets is divided by the
334 * RcvArray group size to produce the number of full groups
335 * needed.
336 *
337 * Groups from the 3 lists are manipulated using the following
338 * rules:
339 * 1. For each set of 8 pagesets, a complete group from
340 * tid_group_list is taken, programmed, and moved to
341 * the tid_full_list list.
342 * 2. For all remaining pagesets:
343 * 2.1 If the tid_used_list is empty and the tid_group_list
344 * is empty, stop processing pageset and return only
345 * what has been programmed up to this point.
346 * 2.2 If the tid_used_list is empty and the tid_group_list
347 * is not empty, move a group from tid_group_list to
348 * tid_used_list.
349 * 2.3 For each group is tid_used_group, program as much as
350 * can fit into the group. If the group becomes fully
351 * used, move it to tid_full_list.
352 */
Mitko Haralanovf727a0c2016-02-05 11:57:46 -0500353int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo)
354{
Mitko Haralanov7e7a436e2016-02-05 11:57:57 -0500355 int ret = 0, need_group = 0, pinned;
356 struct hfi1_filedata *fd = fp->private_data;
357 struct hfi1_ctxtdata *uctxt = fd->uctxt;
358 struct hfi1_devdata *dd = uctxt->dd;
359 unsigned npages, ngroups, pageidx = 0, pageset_count, npagesets,
360 tididx = 0, mapped, mapped_pages = 0;
361 unsigned long vaddr = tinfo->vaddr;
362 struct page **pages = NULL;
363 u32 *tidlist = NULL;
364 struct tid_pageset *pagesets = NULL;
365
366 /* Get the number of pages the user buffer spans */
367 npages = num_user_pages(vaddr, tinfo->length);
368 if (!npages)
369 return -EINVAL;
370
371 if (npages > uctxt->expected_count) {
372 dd_dev_err(dd, "Expected buffer too big\n");
373 return -EINVAL;
374 }
375
376 /* Verify that access is OK for the user buffer */
377 if (!access_ok(VERIFY_WRITE, (void __user *)vaddr,
378 npages * PAGE_SIZE)) {
379 dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
380 (void *)vaddr, npages);
381 return -EFAULT;
382 }
383
384 pagesets = kcalloc(uctxt->expected_count, sizeof(*pagesets),
385 GFP_KERNEL);
386 if (!pagesets)
387 return -ENOMEM;
388
389 /* Allocate the array of struct page pointers needed for pinning */
390 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
391 if (!pages) {
392 ret = -ENOMEM;
393 goto bail;
394 }
395
396 /*
397 * Pin all the pages of the user buffer. If we can't pin all the
398 * pages, accept the amount pinned so far and program only that.
399 * User space knows how to deal with partially programmed buffers.
400 */
Ira Weiny3faa3d92016-07-28 15:21:19 -0400401 if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
Mitko Haralanov0ad2d3d2016-04-12 10:46:29 -0700402 ret = -ENOMEM;
403 goto bail;
404 }
405
Ira Weiny3faa3d92016-07-28 15:21:19 -0400406 pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
Mitko Haralanov7e7a436e2016-02-05 11:57:57 -0500407 if (pinned <= 0) {
408 ret = pinned;
409 goto bail;
410 }
Mitko Haralanova7922f72016-03-08 11:15:39 -0800411 fd->tid_n_pinned += npages;
Mitko Haralanov7e7a436e2016-02-05 11:57:57 -0500412
413 /* Find sets of physically contiguous pages */
414 npagesets = find_phys_blocks(pages, pinned, pagesets);
415
416 /*
417 * We don't need to access this under a lock since tid_used is per
418 * process and the same process cannot be in hfi1_user_exp_rcv_clear()
419 * and hfi1_user_exp_rcv_setup() at the same time.
420 */
421 spin_lock(&fd->tid_lock);
422 if (fd->tid_used + npagesets > fd->tid_limit)
423 pageset_count = fd->tid_limit - fd->tid_used;
424 else
425 pageset_count = npagesets;
426 spin_unlock(&fd->tid_lock);
427
428 if (!pageset_count)
429 goto bail;
430
431 ngroups = pageset_count / dd->rcv_entries.group_size;
432 tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
433 if (!tidlist) {
434 ret = -ENOMEM;
435 goto nomem;
436 }
437
438 tididx = 0;
439
440 /*
441 * From this point on, we are going to be using shared (between master
442 * and subcontexts) context resources. We need to take the lock.
443 */
444 mutex_lock(&uctxt->exp_lock);
445 /*
446 * The first step is to program the RcvArray entries which are complete
447 * groups.
448 */
449 while (ngroups && uctxt->tid_group_list.count) {
450 struct tid_group *grp =
451 tid_group_pop(&uctxt->tid_group_list);
452
453 ret = program_rcvarray(fp, vaddr, grp, pagesets,
454 pageidx, dd->rcv_entries.group_size,
455 pages, tidlist, &tididx, &mapped);
456 /*
457 * If there was a failure to program the RcvArray
458 * entries for the entire group, reset the grp fields
459 * and add the grp back to the free group list.
460 */
461 if (ret <= 0) {
462 tid_group_add_tail(grp, &uctxt->tid_group_list);
463 hfi1_cdbg(TID,
464 "Failed to program RcvArray group %d", ret);
465 goto unlock;
466 }
467
468 tid_group_add_tail(grp, &uctxt->tid_full_list);
469 ngroups--;
470 pageidx += ret;
471 mapped_pages += mapped;
472 }
473
474 while (pageidx < pageset_count) {
475 struct tid_group *grp, *ptr;
476 /*
477 * If we don't have any partially used tid groups, check
478 * if we have empty groups. If so, take one from there and
479 * put in the partially used list.
480 */
481 if (!uctxt->tid_used_list.count || need_group) {
482 if (!uctxt->tid_group_list.count)
483 goto unlock;
484
485 grp = tid_group_pop(&uctxt->tid_group_list);
486 tid_group_add_tail(grp, &uctxt->tid_used_list);
487 need_group = 0;
488 }
489 /*
490 * There is an optimization opportunity here - instead of
491 * fitting as many page sets as we can, check for a group
492 * later on in the list that could fit all of them.
493 */
494 list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
495 list) {
496 unsigned use = min_t(unsigned, pageset_count - pageidx,
497 grp->size - grp->used);
498
499 ret = program_rcvarray(fp, vaddr, grp, pagesets,
500 pageidx, use, pages, tidlist,
501 &tididx, &mapped);
502 if (ret < 0) {
503 hfi1_cdbg(TID,
504 "Failed to program RcvArray entries %d",
505 ret);
506 ret = -EFAULT;
507 goto unlock;
508 } else if (ret > 0) {
509 if (grp->used == grp->size)
510 tid_group_move(grp,
511 &uctxt->tid_used_list,
512 &uctxt->tid_full_list);
513 pageidx += ret;
514 mapped_pages += mapped;
515 need_group = 0;
516 /* Check if we are done so we break out early */
517 if (pageidx >= pageset_count)
518 break;
519 } else if (WARN_ON(ret == 0)) {
520 /*
521 * If ret is 0, we did not program any entries
522 * into this group, which can only happen if
523 * we've screwed up the accounting somewhere.
524 * Warn and try to continue.
525 */
526 need_group = 1;
527 }
528 }
529 }
530unlock:
531 mutex_unlock(&uctxt->exp_lock);
532nomem:
533 hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
534 mapped_pages, ret);
535 if (tididx) {
536 spin_lock(&fd->tid_lock);
537 fd->tid_used += tididx;
538 spin_unlock(&fd->tid_lock);
539 tinfo->tidcnt = tididx;
540 tinfo->length = mapped_pages * PAGE_SIZE;
541
542 if (copy_to_user((void __user *)(unsigned long)tinfo->tidlist,
543 tidlist, sizeof(tidlist[0]) * tididx)) {
544 /*
545 * On failure to copy to the user level, we need to undo
546 * everything done so far so we don't leak resources.
547 */
548 tinfo->tidlist = (unsigned long)&tidlist;
549 hfi1_user_exp_rcv_clear(fp, tinfo);
550 tinfo->tidlist = 0;
551 ret = -EFAULT;
552 goto bail;
553 }
554 }
555
556 /*
557 * If not everything was mapped (due to insufficient RcvArray entries,
558 * for example), unpin all unmapped pages so we can pin them nex time.
559 */
Mitko Haralanova7922f72016-03-08 11:15:39 -0800560 if (mapped_pages != pinned) {
Ira Weiny3faa3d92016-07-28 15:21:19 -0400561 hfi1_release_user_pages(fd->mm, &pages[mapped_pages],
Mitko Haralanov7e7a436e2016-02-05 11:57:57 -0500562 pinned - mapped_pages,
563 false);
Mitko Haralanova7922f72016-03-08 11:15:39 -0800564 fd->tid_n_pinned -= pinned - mapped_pages;
565 }
Mitko Haralanov7e7a436e2016-02-05 11:57:57 -0500566bail:
567 kfree(pagesets);
568 kfree(pages);
569 kfree(tidlist);
570 return ret > 0 ? 0 : ret;
Mitko Haralanovf727a0c2016-02-05 11:57:46 -0500571}
572
573int hfi1_user_exp_rcv_clear(struct file *fp, struct hfi1_tid_info *tinfo)
574{
Mitko Haralanov455d7f12016-02-05 11:57:56 -0500575 int ret = 0;
576 struct hfi1_filedata *fd = fp->private_data;
577 struct hfi1_ctxtdata *uctxt = fd->uctxt;
578 u32 *tidinfo;
579 unsigned tididx;
580
Michael J. Ruhl1bb0d7b2017-02-08 05:28:31 -0800581 tidinfo = memdup_user((void __user *)(unsigned long)tinfo->tidlist,
582 sizeof(tidinfo[0]) * tinfo->tidcnt);
583 if (IS_ERR(tidinfo))
584 return PTR_ERR(tidinfo);
Mitko Haralanov455d7f12016-02-05 11:57:56 -0500585
586 mutex_lock(&uctxt->exp_lock);
587 for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
588 ret = unprogram_rcvarray(fp, tidinfo[tididx], NULL);
589 if (ret) {
590 hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
591 ret);
592 break;
593 }
594 }
595 spin_lock(&fd->tid_lock);
596 fd->tid_used -= tididx;
597 spin_unlock(&fd->tid_lock);
598 tinfo->tidcnt = tididx;
599 mutex_unlock(&uctxt->exp_lock);
Michael J. Ruhl1bb0d7b2017-02-08 05:28:31 -0800600
Mitko Haralanov455d7f12016-02-05 11:57:56 -0500601 kfree(tidinfo);
602 return ret;
Mitko Haralanovf727a0c2016-02-05 11:57:46 -0500603}
604
605int hfi1_user_exp_rcv_invalid(struct file *fp, struct hfi1_tid_info *tinfo)
606{
Mitko Haralanov455d7f12016-02-05 11:57:56 -0500607 struct hfi1_filedata *fd = fp->private_data;
608 struct hfi1_ctxtdata *uctxt = fd->uctxt;
609 unsigned long *ev = uctxt->dd->events +
610 (((uctxt->ctxt - uctxt->dd->first_user_ctxt) *
611 HFI1_MAX_SHARED_CTXTS) + fd->subctxt);
612 u32 *array;
613 int ret = 0;
614
615 if (!fd->invalid_tids)
616 return -EINVAL;
617
618 /*
619 * copy_to_user() can sleep, which will leave the invalid_lock
620 * locked and cause the MMU notifier to be blocked on the lock
621 * for a long time.
622 * Copy the data to a local buffer so we can release the lock.
623 */
624 array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL);
625 if (!array)
626 return -EFAULT;
627
628 spin_lock(&fd->invalid_lock);
629 if (fd->invalid_tid_idx) {
630 memcpy(array, fd->invalid_tids, sizeof(*array) *
631 fd->invalid_tid_idx);
632 memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) *
633 fd->invalid_tid_idx);
634 tinfo->tidcnt = fd->invalid_tid_idx;
635 fd->invalid_tid_idx = 0;
636 /*
637 * Reset the user flag while still holding the lock.
638 * Otherwise, PSM can miss events.
639 */
640 clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
641 } else {
642 tinfo->tidcnt = 0;
643 }
644 spin_unlock(&fd->invalid_lock);
645
646 if (tinfo->tidcnt) {
647 if (copy_to_user((void __user *)tinfo->tidlist,
648 array, sizeof(*array) * tinfo->tidcnt))
649 ret = -EFAULT;
650 }
651 kfree(array);
652
653 return ret;
Mitko Haralanovf727a0c2016-02-05 11:57:46 -0500654}
655
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500656static u32 find_phys_blocks(struct page **pages, unsigned npages,
657 struct tid_pageset *list)
658{
659 unsigned pagecount, pageidx, setcount = 0, i;
660 unsigned long pfn, this_pfn;
661
662 if (!npages)
663 return 0;
664
665 /*
666 * Look for sets of physically contiguous pages in the user buffer.
667 * This will allow us to optimize Expected RcvArray entry usage by
668 * using the bigger supported sizes.
669 */
670 pfn = page_to_pfn(pages[0]);
671 for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
672 this_pfn = i < npages ? page_to_pfn(pages[i]) : 0;
673
674 /*
675 * If the pfn's are not sequential, pages are not physically
676 * contiguous.
677 */
678 if (this_pfn != ++pfn) {
679 /*
680 * At this point we have to loop over the set of
681 * physically contiguous pages and break them down it
682 * sizes supported by the HW.
683 * There are two main constraints:
684 * 1. The max buffer size is MAX_EXPECTED_BUFFER.
685 * If the total set size is bigger than that
686 * program only a MAX_EXPECTED_BUFFER chunk.
687 * 2. The buffer size has to be a power of two. If
688 * it is not, round down to the closes power of
689 * 2 and program that size.
690 */
691 while (pagecount) {
692 int maxpages = pagecount;
693 u32 bufsize = pagecount * PAGE_SIZE;
694
695 if (bufsize > MAX_EXPECTED_BUFFER)
696 maxpages =
697 MAX_EXPECTED_BUFFER >>
698 PAGE_SHIFT;
699 else if (!is_power_of_2(bufsize))
700 maxpages =
701 rounddown_pow_of_two(bufsize) >>
702 PAGE_SHIFT;
703
704 list[setcount].idx = pageidx;
705 list[setcount].count = maxpages;
706 pagecount -= maxpages;
707 pageidx += maxpages;
708 setcount++;
709 }
710 pageidx = i;
711 pagecount = 1;
712 pfn = this_pfn;
713 } else {
714 pagecount++;
715 }
716 }
717 return setcount;
718}
719
720/**
721 * program_rcvarray() - program an RcvArray group with receive buffers
722 * @fp: file pointer
723 * @vaddr: starting user virtual address
724 * @grp: RcvArray group
725 * @sets: array of struct tid_pageset holding information on physically
726 * contiguous chunks from the user buffer
727 * @start: starting index into sets array
728 * @count: number of struct tid_pageset's to program
729 * @pages: an array of struct page * for the user buffer
730 * @tidlist: the array of u32 elements when the information about the
731 * programmed RcvArray entries is to be encoded.
732 * @tididx: starting offset into tidlist
733 * @pmapped: (output parameter) number of pages programmed into the RcvArray
734 * entries.
735 *
736 * This function will program up to 'count' number of RcvArray entries from the
737 * group 'grp'. To make best use of write-combining writes, the function will
738 * perform writes to the unused RcvArray entries which will be ignored by the
739 * HW. Each RcvArray entry will be programmed with a physically contiguous
740 * buffer chunk from the user's virtual buffer.
741 *
742 * Return:
743 * -EINVAL if the requested count is larger than the size of the group,
744 * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
745 * number of RcvArray entries programmed.
746 */
747static int program_rcvarray(struct file *fp, unsigned long vaddr,
748 struct tid_group *grp,
749 struct tid_pageset *sets,
750 unsigned start, u16 count, struct page **pages,
751 u32 *tidlist, unsigned *tididx, unsigned *pmapped)
752{
753 struct hfi1_filedata *fd = fp->private_data;
754 struct hfi1_ctxtdata *uctxt = fd->uctxt;
755 struct hfi1_devdata *dd = uctxt->dd;
756 u16 idx;
757 u32 tidinfo = 0, rcventry, useidx = 0;
758 int mapped = 0;
759
760 /* Count should never be larger than the group size */
761 if (count > grp->size)
762 return -EINVAL;
763
764 /* Find the first unused entry in the group */
765 for (idx = 0; idx < grp->size; idx++) {
766 if (!(grp->map & (1 << idx))) {
767 useidx = idx;
768 break;
769 }
770 rcv_array_wc_fill(dd, grp->base + idx);
771 }
772
773 idx = 0;
774 while (idx < count) {
775 u16 npages, pageidx, setidx = start + idx;
776 int ret = 0;
777
778 /*
779 * If this entry in the group is used, move to the next one.
780 * If we go past the end of the group, exit the loop.
781 */
782 if (useidx >= grp->size) {
783 break;
784 } else if (grp->map & (1 << useidx)) {
785 rcv_array_wc_fill(dd, grp->base + useidx);
786 useidx++;
787 continue;
788 }
789
790 rcventry = grp->base + useidx;
791 npages = sets[setidx].count;
792 pageidx = sets[setidx].idx;
793
794 ret = set_rcvarray_entry(fp, vaddr + (pageidx * PAGE_SIZE),
795 rcventry, grp, pages + pageidx,
796 npages);
797 if (ret)
798 return ret;
799 mapped += npages;
800
801 tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) |
802 EXP_TID_SET(LEN, npages);
803 tidlist[(*tididx)++] = tidinfo;
804 grp->used++;
805 grp->map |= 1 << useidx++;
806 idx++;
807 }
808
809 /* Fill the rest of the group with "blank" writes */
810 for (; useidx < grp->size; useidx++)
811 rcv_array_wc_fill(dd, grp->base + useidx);
812 *pmapped = mapped;
813 return idx;
814}
815
816static int set_rcvarray_entry(struct file *fp, unsigned long vaddr,
817 u32 rcventry, struct tid_group *grp,
818 struct page **pages, unsigned npages)
819{
820 int ret;
821 struct hfi1_filedata *fd = fp->private_data;
822 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800823 struct tid_rb_node *node;
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500824 struct hfi1_devdata *dd = uctxt->dd;
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500825 dma_addr_t phys;
826
827 /*
828 * Allocate the node first so we can handle a potential
829 * failure before we've programmed anything.
830 */
831 node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages),
832 GFP_KERNEL);
833 if (!node)
834 return -ENOMEM;
835
836 phys = pci_map_single(dd->pcidev,
837 __va(page_to_phys(pages[0])),
838 npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
839 if (dma_mapping_error(&dd->pcidev->dev, phys)) {
840 dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
841 phys);
842 kfree(node);
843 return -EFAULT;
844 }
845
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800846 node->mmu.addr = vaddr;
847 node->mmu.len = npages * PAGE_SIZE;
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500848 node->phys = page_to_phys(pages[0]);
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500849 node->npages = npages;
850 node->rcventry = rcventry;
851 node->dma_addr = phys;
852 node->grp = grp;
853 node->freed = false;
854 memcpy(node->pages, pages, sizeof(struct page *) * npages);
855
Dean Luick622c2022016-07-28 15:21:21 -0400856 if (!fd->handler)
Dean Luicke0b09ac2016-07-28 15:21:20 -0400857 ret = tid_rb_insert(fd, &node->mmu);
Mitko Haralanov368f2b52016-03-08 11:14:42 -0800858 else
Dean Luicke0b09ac2016-07-28 15:21:20 -0400859 ret = hfi1_mmu_rb_insert(fd->handler, &node->mmu);
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500860
861 if (ret) {
862 hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800863 node->rcventry, node->mmu.addr, node->phys, ret);
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500864 pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
865 PCI_DMA_FROMDEVICE);
866 kfree(node);
867 return -EFAULT;
868 }
869 hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800870 trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
871 node->mmu.addr, node->phys, phys);
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500872 return 0;
873}
874
875static int unprogram_rcvarray(struct file *fp, u32 tidinfo,
876 struct tid_group **grp)
877{
878 struct hfi1_filedata *fd = fp->private_data;
879 struct hfi1_ctxtdata *uctxt = fd->uctxt;
880 struct hfi1_devdata *dd = uctxt->dd;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800881 struct tid_rb_node *node;
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500882 u8 tidctrl = EXP_TID_GET(tidinfo, CTRL);
Mitko Haralanova92ba6d2016-02-03 14:34:41 -0800883 u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500884
885 if (tididx >= uctxt->expected_count) {
886 dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
887 tididx, uctxt->ctxt);
888 return -EINVAL;
889 }
890
891 if (tidctrl == 0x3)
892 return -EINVAL;
893
Mitko Haralanova92ba6d2016-02-03 14:34:41 -0800894 rcventry = tididx + (tidctrl - 1);
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500895
Mitko Haralanova92ba6d2016-02-03 14:34:41 -0800896 node = fd->entry_to_rb[rcventry];
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800897 if (!node || node->rcventry != (uctxt->expected_base + rcventry))
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500898 return -EBADF;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800899
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500900 if (grp)
901 *grp = node->grp;
Ira Weiny2677a762016-07-28 15:21:26 -0400902
903 if (!fd->handler)
904 cacheless_tid_rb_remove(fd, node);
905 else
906 hfi1_mmu_rb_remove(fd->handler, &node->mmu);
907
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500908 return 0;
909}
910
Ira Weiny5ed3b152016-07-28 12:27:32 -0400911static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500912{
913 struct hfi1_ctxtdata *uctxt = fd->uctxt;
914 struct hfi1_devdata *dd = uctxt->dd;
915
Mitko Haralanov0b091fb2016-02-05 11:57:58 -0500916 trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800917 node->npages, node->mmu.addr, node->phys,
Mitko Haralanov0b091fb2016-02-05 11:57:58 -0500918 node->dma_addr);
919
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500920 hfi1_put_tid(dd, node->rcventry, PT_INVALID, 0, 0);
921 /*
922 * Make sure device has seen the write before we unpin the
923 * pages.
924 */
925 flush_wc();
926
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800927 pci_unmap_single(dd->pcidev, node->dma_addr, node->mmu.len,
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500928 PCI_DMA_FROMDEVICE);
Ira Weiny3faa3d92016-07-28 15:21:19 -0400929 hfi1_release_user_pages(fd->mm, node->pages, node->npages, true);
Mitko Haralanova7922f72016-03-08 11:15:39 -0800930 fd->tid_n_pinned -= node->npages;
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500931
932 node->grp->used--;
933 node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
934
935 if (node->grp->used == node->grp->size - 1)
936 tid_group_move(node->grp, &uctxt->tid_full_list,
937 &uctxt->tid_used_list);
938 else if (!node->grp->used)
939 tid_group_move(node->grp, &uctxt->tid_used_list,
940 &uctxt->tid_group_list);
941 kfree(node);
942}
943
Ira Weiny2677a762016-07-28 15:21:26 -0400944/*
945 * As a simple helper for hfi1_user_exp_rcv_free, this function deals with
946 * clearing nodes in the non-cached case.
947 */
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500948static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
Dean Luicke0b09ac2016-07-28 15:21:20 -0400949 struct exp_tid_set *set,
950 struct hfi1_filedata *fd)
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500951{
952 struct tid_group *grp, *ptr;
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500953 int i;
954
955 list_for_each_entry_safe(grp, ptr, &set->list, list) {
956 list_del_init(&grp->list);
957
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500958 for (i = 0; i < grp->size; i++) {
959 if (grp->map & (1 << i)) {
960 u16 rcventry = grp->base + i;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800961 struct tid_rb_node *node;
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500962
Mitko Haralanova92ba6d2016-02-03 14:34:41 -0800963 node = fd->entry_to_rb[rcventry -
964 uctxt->expected_base];
965 if (!node || node->rcventry != rcventry)
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500966 continue;
Ira Weiny2677a762016-07-28 15:21:26 -0400967
968 cacheless_tid_rb_remove(fd, node);
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500969 }
970 }
Mitko Haralanovf88e0c82016-02-05 11:57:52 -0500971 }
972}
973
Ira Weiny2677a762016-07-28 15:21:26 -0400974/*
975 * Always return 0 from this function. A non-zero return indicates that the
976 * remove operation will be called and that memory should be unpinned.
977 * However, the driver cannot unpin out from under PSM. Instead, retain the
978 * memory (by returning 0) and inform PSM that the memory is going away. PSM
979 * will call back later when it has removed the memory from its list.
980 */
Dean Luicke0b09ac2016-07-28 15:21:20 -0400981static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
Mitko Haralanovf727a0c2016-02-05 11:57:46 -0500982{
Dean Luicke0b09ac2016-07-28 15:21:20 -0400983 struct hfi1_filedata *fdata = arg;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800984 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
985 struct tid_rb_node *node =
986 container_of(mnode, struct tid_rb_node, mmu);
Mitko Haralanovf727a0c2016-02-05 11:57:46 -0500987
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800988 if (node->freed)
989 return 0;
Mitko Haralanovf727a0c2016-02-05 11:57:46 -0500990
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800991 trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->mmu.addr,
992 node->rcventry, node->npages, node->dma_addr);
993 node->freed = true;
Mitko Haralanovb5eb3b22016-02-05 11:57:55 -0500994
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -0800995 spin_lock(&fdata->invalid_lock);
996 if (fdata->invalid_tid_idx < uctxt->expected_count) {
997 fdata->invalid_tids[fdata->invalid_tid_idx] =
998 rcventry2tidinfo(node->rcventry - uctxt->expected_base);
999 fdata->invalid_tids[fdata->invalid_tid_idx] |=
1000 EXP_TID_SET(LEN, node->npages);
1001 if (!fdata->invalid_tid_idx) {
1002 unsigned long *ev;
Mitko Haralanov0b091fb2016-02-05 11:57:58 -05001003
Mitko Haralanovb5eb3b22016-02-05 11:57:55 -05001004 /*
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -08001005 * hfi1_set_uevent_bits() sets a user event flag
1006 * for all processes. Because calling into the
1007 * driver to process TID cache invalidations is
1008 * expensive and TID cache invalidations are
1009 * handled on a per-process basis, we can
1010 * optimize this to set the flag only for the
1011 * process in question.
Mitko Haralanovb5eb3b22016-02-05 11:57:55 -05001012 */
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -08001013 ev = uctxt->dd->events +
1014 (((uctxt->ctxt - uctxt->dd->first_user_ctxt) *
1015 HFI1_MAX_SHARED_CTXTS) + fdata->subctxt);
1016 set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
Mitko Haralanovb5eb3b22016-02-05 11:57:55 -05001017 }
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -08001018 fdata->invalid_tid_idx++;
Mitko Haralanovb5eb3b22016-02-05 11:57:55 -05001019 }
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -08001020 spin_unlock(&fdata->invalid_lock);
1021 return 0;
Mitko Haralanovf727a0c2016-02-05 11:57:46 -05001022}
1023
Dean Luicke0b09ac2016-07-28 15:21:20 -04001024static int tid_rb_insert(void *arg, struct mmu_rb_node *node)
Mitko Haralanovf727a0c2016-02-05 11:57:46 -05001025{
Dean Luicke0b09ac2016-07-28 15:21:20 -04001026 struct hfi1_filedata *fdata = arg;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -08001027 struct tid_rb_node *tnode =
1028 container_of(node, struct tid_rb_node, mmu);
Mitko Haralanova92ba6d2016-02-03 14:34:41 -08001029 u32 base = fdata->uctxt->expected_base;
Mitko Haralanovf727a0c2016-02-05 11:57:46 -05001030
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -08001031 fdata->entry_to_rb[tnode->rcventry - base] = tnode;
Mitko Haralanovf727a0c2016-02-05 11:57:46 -05001032 return 0;
1033}
1034
Ira Weiny2677a762016-07-28 15:21:26 -04001035static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
1036 struct tid_rb_node *tnode)
1037{
1038 u32 base = fdata->uctxt->expected_base;
1039
1040 fdata->entry_to_rb[tnode->rcventry - base] = NULL;
1041 clear_tid_node(fdata, tnode);
1042}
1043
Dean Luick082b3532016-07-28 15:21:25 -04001044static void tid_rb_remove(void *arg, struct mmu_rb_node *node)
Mitko Haralanovf727a0c2016-02-05 11:57:46 -05001045{
Dean Luicke0b09ac2016-07-28 15:21:20 -04001046 struct hfi1_filedata *fdata = arg;
Mitko Haralanov06e0ffa2016-03-08 11:14:20 -08001047 struct tid_rb_node *tnode =
1048 container_of(node, struct tid_rb_node, mmu);
Mitko Haralanovf727a0c2016-02-05 11:57:46 -05001049
Ira Weiny2677a762016-07-28 15:21:26 -04001050 cacheless_tid_rb_remove(fdata, tnode);
Mitko Haralanova92ba6d2016-02-03 14:34:41 -08001051}