blob: e898991655d498a7fdac81c7b7f61cdb76a8b7c0 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07002 * Copyright(c) 2015-2017 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040047#include <linux/poll.h>
48#include <linux/cdev.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040049#include <linux/vmalloc.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040050#include <linux/io.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010051#include <linux/sched/mm.h>
Michael J. Ruhl8737ce92017-05-04 05:15:15 -070052#include <linux/bitmap.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040053
Jason Gunthorpee6bd18f2016-04-10 19:13:13 -060054#include <rdma/ib.h>
55
Mike Marciniszyn77241052015-07-30 15:17:43 -040056#include "hfi.h"
57#include "pio.h"
58#include "device.h"
59#include "common.h"
60#include "trace.h"
Harish Chegondi637f4602017-08-21 18:27:23 -070061#include "mmu_rb.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040062#include "user_sdma.h"
Mitko Haralanov701e4412015-10-30 18:58:43 -040063#include "user_exp_rcv.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080064#include "aspm.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040065
66#undef pr_fmt
67#define pr_fmt(fmt) DRIVER_NAME ": " fmt
68
69#define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
70
71/*
72 * File operation functions
73 */
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -070074static int hfi1_file_open(struct inode *inode, struct file *fp);
75static int hfi1_file_close(struct inode *inode, struct file *fp);
76static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from);
77static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt);
78static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040079
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -070080static u64 kvirt_to_phys(void *addr);
Michael J. Ruhlddebe982017-09-26 07:03:50 -070081static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -070082static void init_subctxts(struct hfi1_ctxtdata *uctxt,
83 const struct hfi1_user_info *uinfo);
Michael J. Ruhle87473b2017-07-29 08:43:32 -070084static int init_user_ctxt(struct hfi1_filedata *fd,
85 struct hfi1_ctxtdata *uctxt);
Michael J. Ruhl62239fc2017-05-04 05:15:21 -070086static void user_init(struct hfi1_ctxtdata *uctxt);
Michael J. Ruhlff1a5582017-09-26 07:03:57 -070087static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
Michael J. Ruhl45afb322017-09-26 07:04:10 -070088static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
Michael J. Ruhlf404ca42017-09-26 07:04:16 -070089static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
90 u32 len);
Michael J. Ruhl3920eef2017-09-26 07:04:22 -070091static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg,
92 u32 len);
Michael J. Ruhl8a41da02017-09-26 07:04:29 -070093static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
94 u32 len);
Michael J. Ruhle87473b2017-07-29 08:43:32 -070095static int setup_base_ctxt(struct hfi1_filedata *fd,
96 struct hfi1_ctxtdata *uctxt);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -070097static int setup_subctxt(struct hfi1_ctxtdata *uctxt);
Michael J. Ruhl5fbded42017-05-04 05:14:57 -070098
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -070099static int find_sub_ctxt(struct hfi1_filedata *fd,
100 const struct hfi1_user_info *uinfo);
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -0700101static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700102 struct hfi1_user_info *uinfo,
103 struct hfi1_ctxtdata **cd);
Michael J. Ruhl42492012017-07-24 07:45:43 -0700104static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -0700105static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
106static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700107static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -0700108 unsigned long events);
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700109static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey);
110static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -0700111 int start_stop);
112static int vma_fault(struct vm_fault *vmf);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700113static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
114 unsigned long arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400115
116static const struct file_operations hfi1_file_ops = {
117 .owner = THIS_MODULE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400118 .write_iter = hfi1_write_iter,
119 .open = hfi1_file_open,
120 .release = hfi1_file_close,
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700121 .unlocked_ioctl = hfi1_file_ioctl,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400122 .poll = hfi1_poll,
123 .mmap = hfi1_file_mmap,
124 .llseek = noop_llseek,
125};
126
Arvind Yadav733da3b2017-08-28 09:59:28 +0530127static const struct vm_operations_struct vm_ops = {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400128 .fault = vma_fault,
129};
130
131/*
132 * Types of memories mapped into user processes' space
133 */
134enum mmap_types {
135 PIO_BUFS = 1,
136 PIO_BUFS_SOP,
137 PIO_CRED,
138 RCV_HDRQ,
139 RCV_EGRBUF,
140 UREGS,
141 EVENTS,
142 STATUS,
143 RTAIL,
144 SUBCTXT_UREGS,
145 SUBCTXT_RCV_HDRQ,
146 SUBCTXT_EGRBUF,
147 SDMA_COMP
148};
149
150/*
151 * Masks and offsets defining the mmap tokens
152 */
153#define HFI1_MMAP_OFFSET_MASK 0xfffULL
154#define HFI1_MMAP_OFFSET_SHIFT 0
155#define HFI1_MMAP_SUBCTXT_MASK 0xfULL
156#define HFI1_MMAP_SUBCTXT_SHIFT 12
157#define HFI1_MMAP_CTXT_MASK 0xffULL
158#define HFI1_MMAP_CTXT_SHIFT 16
159#define HFI1_MMAP_TYPE_MASK 0xfULL
160#define HFI1_MMAP_TYPE_SHIFT 24
161#define HFI1_MMAP_MAGIC_MASK 0xffffffffULL
162#define HFI1_MMAP_MAGIC_SHIFT 32
163
164#define HFI1_MMAP_MAGIC 0xdabbad00
165
166#define HFI1_MMAP_TOKEN_SET(field, val) \
167 (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
168#define HFI1_MMAP_TOKEN_GET(field, token) \
169 (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
170#define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \
171 (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
172 HFI1_MMAP_TOKEN_SET(TYPE, type) | \
173 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
174 HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
Geliang Tange260e402015-10-03 10:34:59 +0800175 HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400176
Mike Marciniszyn77241052015-07-30 15:17:43 -0400177#define dbg(fmt, ...) \
178 pr_info(fmt, ##__VA_ARGS__)
179
Mike Marciniszyn77241052015-07-30 15:17:43 -0400180static inline int is_valid_mmap(u64 token)
181{
182 return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
183}
184
185static int hfi1_file_open(struct inode *inode, struct file *fp)
186{
Ira Weinyea3a0ee2016-07-28 12:27:35 -0400187 struct hfi1_filedata *fd;
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -0700188 struct hfi1_devdata *dd = container_of(inode->i_cdev,
189 struct hfi1_devdata,
190 user_cdev);
191
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -0700192 if (!((dd->flags & HFI1_PRESENT) && dd->kregbase1))
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700193 return -EINVAL;
194
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -0700195 if (!atomic_inc_not_zero(&dd->user_refcount))
196 return -ENXIO;
197
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -0700198 /* Just take a ref now. Not all opens result in a context assign */
199 kobject_get(&dd->kobj);
200
Mike Marciniszyn77241052015-07-30 15:17:43 -0400201 /* The real work is performed later in assign_ctxt() */
Ira Weinyea3a0ee2016-07-28 12:27:35 -0400202
203 fd = kzalloc(sizeof(*fd), GFP_KERNEL);
204
Ira Weiny3faa3d92016-07-28 15:21:19 -0400205 if (fd) {
206 fd->rec_cpu_num = -1; /* no cpu affinity by default */
207 fd->mm = current->mm;
Vegard Nossumf1f10072017-02-27 14:30:07 -0800208 mmgrab(fd->mm);
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700209 fd->dd = dd;
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -0700210 fp->private_data = fd;
211 } else {
212 fp->private_data = NULL;
213
214 if (atomic_dec_and_test(&dd->user_refcount))
215 complete(&dd->user_comp);
216
217 return -ENOMEM;
Ira Weiny3faa3d92016-07-28 15:21:19 -0400218 }
Ira Weinyea3a0ee2016-07-28 12:27:35 -0400219
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -0700220 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400221}
222
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700223static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
224 unsigned long arg)
225{
226 struct hfi1_filedata *fd = fp->private_data;
227 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700228 int ret = 0;
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700229 int uval = 0;
230 unsigned long ul_uval = 0;
231 u16 uval16 = 0;
232
Dennis Dalessandro8a1882e2016-05-19 05:26:37 -0700233 hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700234 if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
235 cmd != HFI1_IOCTL_GET_VERS &&
236 !uctxt)
237 return -EINVAL;
238
239 switch (cmd) {
240 case HFI1_IOCTL_ASSIGN_CTXT:
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700241 ret = assign_ctxt(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700242 break;
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700243
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700244 case HFI1_IOCTL_CTXT_INFO:
Michael J. Ruhlff1a5582017-09-26 07:03:57 -0700245 ret = get_ctxt_info(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700246 break;
Michael J. Ruhlff1a5582017-09-26 07:03:57 -0700247
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700248 case HFI1_IOCTL_USER_INFO:
Michael J. Ruhl45afb322017-09-26 07:04:10 -0700249 ret = get_base_info(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700250 break;
Michael J. Ruhl45afb322017-09-26 07:04:10 -0700251
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700252 case HFI1_IOCTL_CREDIT_UPD:
Markus Elfringf7ca5352016-07-23 08:30:52 +0200253 if (uctxt)
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700254 sc_return_credits(uctxt->sc);
255 break;
256
257 case HFI1_IOCTL_TID_UPDATE:
Michael J. Ruhlf404ca42017-09-26 07:04:16 -0700258 ret = user_exp_rcv_setup(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700259 break;
260
261 case HFI1_IOCTL_TID_FREE:
Michael J. Ruhl3920eef2017-09-26 07:04:22 -0700262 ret = user_exp_rcv_clear(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700263 break;
264
265 case HFI1_IOCTL_TID_INVAL_READ:
Michael J. Ruhl8a41da02017-09-26 07:04:29 -0700266 ret = user_exp_rcv_invalid(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700267 break;
268
269 case HFI1_IOCTL_RECV_CTRL:
270 ret = get_user(uval, (int __user *)arg);
271 if (ret != 0)
272 return -EFAULT;
273 ret = manage_rcvq(uctxt, fd->subctxt, uval);
274 break;
275
276 case HFI1_IOCTL_POLL_TYPE:
277 ret = get_user(uval, (int __user *)arg);
278 if (ret != 0)
279 return -EFAULT;
280 uctxt->poll_type = (typeof(uctxt->poll_type))uval;
281 break;
282
283 case HFI1_IOCTL_ACK_EVENT:
284 ret = get_user(ul_uval, (unsigned long __user *)arg);
285 if (ret != 0)
286 return -EFAULT;
287 ret = user_event_ack(uctxt, fd->subctxt, ul_uval);
288 break;
289
290 case HFI1_IOCTL_SET_PKEY:
291 ret = get_user(uval16, (u16 __user *)arg);
292 if (ret != 0)
293 return -EFAULT;
294 if (HFI1_CAP_IS_USET(PKEY_CHECK))
295 ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16);
296 else
297 return -EPERM;
298 break;
299
300 case HFI1_IOCTL_CTXT_RESET: {
301 struct send_context *sc;
302 struct hfi1_devdata *dd;
303
304 if (!uctxt || !uctxt->dd || !uctxt->sc)
305 return -EINVAL;
306
307 /*
308 * There is no protection here. User level has to
309 * guarantee that no one will be writing to the send
310 * context while it is being re-initialized.
311 * If user level breaks that guarantee, it will break
312 * it's own context and no one else's.
313 */
314 dd = uctxt->dd;
315 sc = uctxt->sc;
316 /*
317 * Wait until the interrupt handler has marked the
318 * context as halted or frozen. Report error if we time
319 * out.
320 */
321 wait_event_interruptible_timeout(
322 sc->halt_wait, (sc->flags & SCF_HALTED),
323 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
324 if (!(sc->flags & SCF_HALTED))
325 return -ENOLCK;
326
327 /*
328 * If the send context was halted due to a Freeze,
329 * wait until the device has been "unfrozen" before
330 * resetting the context.
331 */
332 if (sc->flags & SCF_FROZEN) {
333 wait_event_interruptible_timeout(
334 dd->event_queue,
335 !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
336 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
337 if (dd->flags & HFI1_FROZEN)
338 return -ENOLCK;
339
340 if (dd->flags & HFI1_FORCED_FREEZE)
341 /*
342 * Don't allow context reset if we are into
343 * forced freeze
344 */
345 return -ENODEV;
346
347 sc_disable(sc);
348 ret = sc_enable(sc);
Michael J. Ruhl22505632017-07-24 07:46:06 -0700349 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700350 } else {
351 ret = sc_restart(sc);
352 }
353 if (!ret)
354 sc_return_credits(sc);
355 break;
356 }
357
358 case HFI1_IOCTL_GET_VERS:
359 uval = HFI1_USER_SWVERSION;
360 if (put_user(uval, (int __user *)arg))
361 return -EFAULT;
362 break;
363
364 default:
365 return -EINVAL;
366 }
367
368 return ret;
369}
370
Mike Marciniszyn77241052015-07-30 15:17:43 -0400371static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
372{
Ira Weiny9e10af42015-10-30 18:58:40 -0400373 struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
374 struct hfi1_user_sdma_pkt_q *pq = fd->pq;
375 struct hfi1_user_sdma_comp_q *cq = fd->cq;
Ira Weiny0904f322016-07-01 16:00:55 -0700376 int done = 0, reqs = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400377 unsigned long dim = from->nr_segs;
378
Ira Weiny0904f322016-07-01 16:00:55 -0700379 if (!cq || !pq)
380 return -EIO;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400381
Ira Weiny0904f322016-07-01 16:00:55 -0700382 if (!iter_is_iovec(from) || !dim)
383 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400384
Michael J. Ruhl34ab4de2017-08-28 11:23:27 -0700385 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400386
Ira Weiny0904f322016-07-01 16:00:55 -0700387 if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
388 return -ENOSPC;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400389
390 while (dim) {
Ira Weiny0904f322016-07-01 16:00:55 -0700391 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400392 unsigned long count = 0;
393
394 ret = hfi1_user_sdma_process_request(
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -0700395 fd, (struct iovec *)(from->iov + done),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400396 dim, &count);
Ira Weiny0904f322016-07-01 16:00:55 -0700397 if (ret) {
398 reqs = ret;
399 break;
400 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400401 dim -= count;
402 done += count;
403 reqs++;
404 }
Ira Weiny0904f322016-07-01 16:00:55 -0700405
406 return reqs;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400407}
408
409static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
410{
Ira Weiny9e10af42015-10-30 18:58:40 -0400411 struct hfi1_filedata *fd = fp->private_data;
412 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400413 struct hfi1_devdata *dd;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700414 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400415 u64 token = vma->vm_pgoff << PAGE_SHIFT,
416 memaddr = 0;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700417 void *memvirt = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400418 u8 subctxt, mapio = 0, vmf = 0, type;
419 ssize_t memlen = 0;
420 int ret = 0;
421 u16 ctxt;
422
Mike Marciniszyn77241052015-07-30 15:17:43 -0400423 if (!is_valid_mmap(token) || !uctxt ||
424 !(vma->vm_flags & VM_SHARED)) {
425 ret = -EINVAL;
426 goto done;
427 }
428 dd = uctxt->dd;
429 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
430 subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
431 type = HFI1_MMAP_TOKEN_GET(TYPE, token);
Ira Weiny9e10af42015-10-30 18:58:40 -0400432 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400433 ret = -EINVAL;
434 goto done;
435 }
436
437 flags = vma->vm_flags;
438
439 switch (type) {
440 case PIO_BUFS:
441 case PIO_BUFS_SOP:
442 memaddr = ((dd->physaddr + TXE_PIO_SEND) +
443 /* chip pio base */
Amitoj Kaur Chawlad32cf442015-10-16 22:09:08 +0530444 (uctxt->sc->hw_context * BIT(16))) +
Mike Marciniszyn77241052015-07-30 15:17:43 -0400445 /* 64K PIO space / ctxt */
446 (type == PIO_BUFS_SOP ?
447 (TXE_PIO_SIZE / 2) : 0); /* sop? */
448 /*
449 * Map only the amount allocated to the context, not the
450 * entire available context's PIO space.
451 */
Amitoj Kaur Chawla437b29d2016-03-04 22:45:00 +0530452 memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400453 flags &= ~VM_MAYREAD;
454 flags |= VM_DONTCOPY | VM_DONTEXPAND;
455 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
456 mapio = 1;
457 break;
458 case PIO_CRED:
459 if (flags & VM_WRITE) {
460 ret = -EPERM;
461 goto done;
462 }
463 /*
464 * The credit return location for this context could be on the
465 * second or third page allocated for credit returns (if number
466 * of enabled contexts > 64 and 128 respectively).
467 */
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700468 memvirt = dd->cr_base[uctxt->numa_id].va;
469 memaddr = virt_to_phys(memvirt) +
Mike Marciniszyn77241052015-07-30 15:17:43 -0400470 (((u64)uctxt->sc->hw_free -
471 (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
472 memlen = PAGE_SIZE;
473 flags &= ~VM_MAYWRITE;
474 flags |= VM_DONTCOPY | VM_DONTEXPAND;
475 /*
476 * The driver has already allocated memory for credit
477 * returns and programmed it into the chip. Has that
478 * memory been flagged as non-cached?
479 */
480 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
481 mapio = 1;
482 break;
483 case RCV_HDRQ:
Mike Marciniszyn77241052015-07-30 15:17:43 -0400484 memlen = uctxt->rcvhdrq_size;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700485 memvirt = uctxt->rcvhdrq;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400486 break;
487 case RCV_EGRBUF: {
488 unsigned long addr;
489 int i;
490 /*
491 * The RcvEgr buffer need to be handled differently
492 * as multiple non-contiguous pages need to be mapped
493 * into the user process.
494 */
495 memlen = uctxt->egrbufs.size;
496 if ((vma->vm_end - vma->vm_start) != memlen) {
497 dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
498 (vma->vm_end - vma->vm_start), memlen);
499 ret = -EINVAL;
500 goto done;
501 }
502 if (vma->vm_flags & VM_WRITE) {
503 ret = -EPERM;
504 goto done;
505 }
506 vma->vm_flags &= ~VM_MAYWRITE;
507 addr = vma->vm_start;
508 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700509 memlen = uctxt->egrbufs.buffers[i].len;
510 memvirt = uctxt->egrbufs.buffers[i].addr;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400511 ret = remap_pfn_range(
512 vma, addr,
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700513 /*
514 * virt_to_pfn() does the same, but
515 * it's not available on x86_64
516 * when CONFIG_MMU is enabled.
517 */
518 PFN_DOWN(__pa(memvirt)),
519 memlen,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400520 vma->vm_page_prot);
521 if (ret < 0)
522 goto done;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700523 addr += memlen;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400524 }
525 ret = 0;
526 goto done;
527 }
528 case UREGS:
529 /*
530 * Map only the page that contains this context's user
531 * registers.
532 */
533 memaddr = (unsigned long)
534 (dd->physaddr + RXE_PER_CONTEXT_USER)
535 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
536 /*
537 * TidFlow table is on the same page as the rest of the
538 * user registers.
539 */
540 memlen = PAGE_SIZE;
541 flags |= VM_DONTCOPY | VM_DONTEXPAND;
542 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
543 mapio = 1;
544 break;
545 case EVENTS:
546 /*
547 * Use the page where this context's flags are. User level
548 * knows where it's own bitmap is within the page.
549 */
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -0700550 memaddr = (unsigned long)
551 (dd->events + uctxt_offset(uctxt)) & PAGE_MASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400552 memlen = PAGE_SIZE;
553 /*
554 * v3.7 removes VM_RESERVED but the effect is kept by
555 * using VM_IO.
556 */
557 flags |= VM_IO | VM_DONTEXPAND;
558 vmf = 1;
559 break;
560 case STATUS:
Ira Weiny12220262017-04-09 10:17:24 -0700561 if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) {
562 ret = -EPERM;
563 goto done;
564 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400565 memaddr = kvirt_to_phys((void *)dd->status);
566 memlen = PAGE_SIZE;
567 flags |= VM_IO | VM_DONTEXPAND;
568 break;
569 case RTAIL:
570 if (!HFI1_CAP_IS_USET(DMA_RTAIL)) {
571 /*
572 * If the memory allocation failed, the context alloc
573 * also would have failed, so we would never get here
574 */
575 ret = -EINVAL;
576 goto done;
577 }
578 if (flags & VM_WRITE) {
579 ret = -EPERM;
580 goto done;
581 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400582 memlen = PAGE_SIZE;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700583 memvirt = (void *)uctxt->rcvhdrtail_kvaddr;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400584 flags &= ~VM_MAYWRITE;
585 break;
586 case SUBCTXT_UREGS:
587 memaddr = (u64)uctxt->subctxt_uregbase;
588 memlen = PAGE_SIZE;
589 flags |= VM_IO | VM_DONTEXPAND;
590 vmf = 1;
591 break;
592 case SUBCTXT_RCV_HDRQ:
593 memaddr = (u64)uctxt->subctxt_rcvhdr_base;
594 memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
595 flags |= VM_IO | VM_DONTEXPAND;
596 vmf = 1;
597 break;
598 case SUBCTXT_EGRBUF:
599 memaddr = (u64)uctxt->subctxt_rcvegrbuf;
600 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
601 flags |= VM_IO | VM_DONTEXPAND;
602 flags &= ~VM_MAYWRITE;
603 vmf = 1;
604 break;
605 case SDMA_COMP: {
Ira Weiny9e10af42015-10-30 18:58:40 -0400606 struct hfi1_user_sdma_comp_q *cq = fd->cq;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400607
Ira Weiny9e10af42015-10-30 18:58:40 -0400608 if (!cq) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400609 ret = -EFAULT;
610 goto done;
611 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400612 memaddr = (u64)cq->comps;
Amitoj Kaur Chawla437b29d2016-03-04 22:45:00 +0530613 memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400614 flags |= VM_IO | VM_DONTEXPAND;
615 vmf = 1;
616 break;
617 }
618 default:
619 ret = -EINVAL;
620 break;
621 }
622
623 if ((vma->vm_end - vma->vm_start) != memlen) {
624 hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
Ira Weiny9e10af42015-10-30 18:58:40 -0400625 uctxt->ctxt, fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400626 (vma->vm_end - vma->vm_start), memlen);
627 ret = -EINVAL;
628 goto done;
629 }
630
631 vma->vm_flags = flags;
Sebastian Sanchez6c63e422015-11-06 20:06:56 -0500632 hfi1_cdbg(PROC,
633 "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
634 ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400635 vma->vm_end - vma->vm_start, vma->vm_flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400636 if (vmf) {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700637 vma->vm_pgoff = PFN_DOWN(memaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400638 vma->vm_ops = &vm_ops;
639 ret = 0;
640 } else if (mapio) {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700641 ret = io_remap_pfn_range(vma, vma->vm_start,
642 PFN_DOWN(memaddr),
643 memlen,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400644 vma->vm_page_prot);
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700645 } else if (memvirt) {
646 ret = remap_pfn_range(vma, vma->vm_start,
647 PFN_DOWN(__pa(memvirt)),
648 memlen,
649 vma->vm_page_prot);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400650 } else {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700651 ret = remap_pfn_range(vma, vma->vm_start,
652 PFN_DOWN(memaddr),
653 memlen,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400654 vma->vm_page_prot);
655 }
656done:
657 return ret;
658}
659
660/*
661 * Local (non-chip) user memory is not mapped right away but as it is
662 * accessed by the user-level code.
663 */
Dave Jiang11bac802017-02-24 14:56:41 -0800664static int vma_fault(struct vm_fault *vmf)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400665{
666 struct page *page;
667
668 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
669 if (!page)
670 return VM_FAULT_SIGBUS;
671
672 get_page(page);
673 vmf->page = page;
674
675 return 0;
676}
677
678static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
679{
680 struct hfi1_ctxtdata *uctxt;
681 unsigned pollflag;
682
Ira Weiny9e10af42015-10-30 18:58:40 -0400683 uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400684 if (!uctxt)
685 pollflag = POLLERR;
686 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
687 pollflag = poll_urgent(fp, pt);
688 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
689 pollflag = poll_next(fp, pt);
690 else /* invalid */
691 pollflag = POLLERR;
692
693 return pollflag;
694}
695
696static int hfi1_file_close(struct inode *inode, struct file *fp)
697{
698 struct hfi1_filedata *fdata = fp->private_data;
699 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -0700700 struct hfi1_devdata *dd = container_of(inode->i_cdev,
701 struct hfi1_devdata,
702 user_cdev);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400703 unsigned long flags, *ev;
704
705 fp->private_data = NULL;
706
707 if (!uctxt)
708 goto done;
709
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700710 hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400711
712 flush_wc();
713 /* drain user sdma queue */
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700714 hfi1_user_sdma_free_queues(fdata, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400715
Mitko Haralanov957558c2016-02-03 14:33:40 -0800716 /* release the cpu */
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700717 hfi1_put_proc_affinity(fdata->rec_cpu_num);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800718
Michael J. Ruhl224d71f2017-05-04 05:14:34 -0700719 /* clean up rcv side */
720 hfi1_user_exp_rcv_free(fdata);
721
Mike Marciniszyn77241052015-07-30 15:17:43 -0400722 /*
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700723 * fdata->uctxt is used in the above cleanup. It is not ready to be
724 * removed until here.
725 */
726 fdata->uctxt = NULL;
727 hfi1_rcd_put(uctxt);
728
729 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -0400730 * Clear any left over, unhandled events so the next process that
731 * gets this context doesn't get confused.
732 */
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -0700733 ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400734 *ev = 0;
735
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700736 spin_lock_irqsave(&dd->uctxt_lock, flags);
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700737 __clear_bit(fdata->subctxt, uctxt->in_use_ctxts);
738 if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700739 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400740 goto done;
741 }
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700742 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400743
Mike Marciniszyn77241052015-07-30 15:17:43 -0400744 /*
745 * Disable receive context and interrupt available, reset all
746 * RcvCtxtCtrl bits to default values.
747 */
748 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
749 HFI1_RCVCTRL_TIDFLOW_DIS |
750 HFI1_RCVCTRL_INTRAVAIL_DIS |
Mitko Haralanov566c1572016-02-03 14:32:49 -0800751 HFI1_RCVCTRL_TAILUPD_DIS |
Mike Marciniszyn77241052015-07-30 15:17:43 -0400752 HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
753 HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
Michael J. Ruhl22505632017-07-24 07:46:06 -0700754 HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400755 /* Clear the context's J_KEY */
Michael J. Ruhl17573972017-07-24 07:46:01 -0700756 hfi1_clear_ctxt_jkey(dd, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400757 /*
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700758 * If a send context is allocated, reset context integrity
759 * checks to default and disable the send context.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400760 */
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700761 if (uctxt->sc) {
762 set_pio_integrity(uctxt->sc);
763 sc_disable(uctxt->sc);
764 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400765
Mike Marciniszyn9c1a99c32017-06-09 15:59:40 -0700766 hfi1_free_ctxt_rcv_groups(uctxt);
Michael J. Ruhl637a9a72017-05-04 05:15:03 -0700767 hfi1_clear_ctxt_pkey(dd, uctxt);
Mitko Haralanov94158442016-04-20 06:05:36 -0700768
Mike Marciniszyn77241052015-07-30 15:17:43 -0400769 uctxt->event_flags = 0;
Michael J. Ruhl42492012017-07-24 07:45:43 -0700770
771 deallocate_ctxt(uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400772done:
Ira Weinye0cf75d2016-08-16 13:27:03 -0700773 mmdrop(fdata->mm);
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -0700774 kobject_put(&dd->kobj);
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -0700775
776 if (atomic_dec_and_test(&dd->user_refcount))
777 complete(&dd->user_comp);
778
Mike Marciniszyn77241052015-07-30 15:17:43 -0400779 kfree(fdata);
780 return 0;
781}
782
783/*
784 * Convert kernel *virtual* addresses to physical addresses.
785 * This is used to vmalloc'ed addresses.
786 */
787static u64 kvirt_to_phys(void *addr)
788{
789 struct page *page;
790 u64 paddr = 0;
791
792 page = vmalloc_to_page(addr);
793 if (page)
794 paddr = page_to_pfn(page) << PAGE_SHIFT;
795
796 return paddr;
797}
798
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700799/**
800 * complete_subctxt
801 * @fd: valid filedata pointer
802 *
803 * Sub-context info can only be set up after the base context
804 * has been completed. This is indicated by the clearing of the
805 * HFI1_CTXT_BASE_UINIT bit.
806 *
807 * Wait for the bit to be cleared, and then complete the subcontext
808 * initialization.
809 *
810 */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700811static int complete_subctxt(struct hfi1_filedata *fd)
812{
813 int ret;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700814 unsigned long flags;
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700815
816 /*
817 * sub-context info can only be set up after the base context
818 * has been completed.
819 */
820 ret = wait_event_interruptible(
821 fd->uctxt->wait,
822 !test_bit(HFI1_CTXT_BASE_UNINIT, &fd->uctxt->event_flags));
823
824 if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags))
825 ret = -ENOMEM;
826
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700827 /* Finish the sub-context init */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700828 if (!ret) {
829 fd->rec_cpu_num = hfi1_get_proc_affinity(fd->uctxt->numa_id);
830 ret = init_user_ctxt(fd, fd->uctxt);
831 }
832
833 if (ret) {
834 hfi1_rcd_put(fd->uctxt);
835 fd->uctxt = NULL;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700836 spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700837 __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700838 spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700839 }
840
841 return ret;
842}
843
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700844static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400845{
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700846 int ret;
Dennis Dalessandro0eb62652016-05-19 05:25:50 -0700847 unsigned int swmajor, swminor;
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700848 struct hfi1_ctxtdata *uctxt = NULL;
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700849 struct hfi1_user_info uinfo;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400850
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700851 if (fd->uctxt)
852 return -EINVAL;
853
854 if (sizeof(uinfo) != len)
855 return -EINVAL;
856
857 if (copy_from_user(&uinfo, (void __user *)arg, sizeof(uinfo)))
858 return -EFAULT;
859
860 swmajor = uinfo.userversion >> 16;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700861 if (swmajor != HFI1_USER_SWMAJOR)
862 return -ENODEV;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400863
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700864 if (uinfo.subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700865 return -EINVAL;
866
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700867 swminor = uinfo.userversion & 0xffff;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400868
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700869 /*
870 * Acquire the mutex to protect against multiple creations of what
871 * could be a shared base context.
872 */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400873 mutex_lock(&hfi1_mutex);
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700874 /*
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700875 * Get a sub context if available (fd->uctxt will be set).
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700876 * ret < 0 error, 0 no context, 1 sub-context found
877 */
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700878 ret = find_sub_ctxt(fd, &uinfo);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400879
880 /*
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700881 * Allocate a base context if context sharing is not required or a
882 * sub context wasn't found.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400883 */
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700884 if (!ret)
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700885 ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt);
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700886
Mike Marciniszyn77241052015-07-30 15:17:43 -0400887 mutex_unlock(&hfi1_mutex);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700888
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700889 /* Depending on the context type, finish the appropriate init */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700890 switch (ret) {
891 case 0:
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700892 ret = setup_base_ctxt(fd, uctxt);
893 if (uctxt->subctxt_cnt) {
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700894 /*
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700895 * Base context is done (successfully or not), notify
896 * anybody using a sub-context that is waiting for
897 * this completion.
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700898 */
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700899 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
900 wake_up(&uctxt->wait);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700901 }
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700902 break;
903 case 1:
904 ret = complete_subctxt(fd);
905 break;
906 default:
907 break;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700908 }
909
Mike Marciniszyn77241052015-07-30 15:17:43 -0400910 return ret;
911}
912
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700913/**
914 * match_ctxt
915 * @fd: valid filedata pointer
916 * @uinfo: user info to compare base context with
917 * @uctxt: context to compare uinfo to.
918 *
919 * Compare the given context with the given information to see if it
920 * can be used for a sub context.
921 */
922static int match_ctxt(struct hfi1_filedata *fd,
923 const struct hfi1_user_info *uinfo,
924 struct hfi1_ctxtdata *uctxt)
925{
926 struct hfi1_devdata *dd = fd->dd;
927 unsigned long flags;
928 u16 subctxt;
929
930 /* Skip dynamically allocated kernel contexts */
931 if (uctxt->sc && (uctxt->sc->type == SC_KERNEL))
932 return 0;
933
934 /* Skip ctxt if it doesn't match the requested one */
935 if (memcmp(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)) ||
936 uctxt->jkey != generate_jkey(current_uid()) ||
937 uctxt->subctxt_id != uinfo->subctxt_id ||
938 uctxt->subctxt_cnt != uinfo->subctxt_cnt)
939 return 0;
940
941 /* Verify the sharing process matches the base */
942 if (uctxt->userversion != uinfo->userversion)
943 return -EINVAL;
944
945 /* Find an unused sub context */
946 spin_lock_irqsave(&dd->uctxt_lock, flags);
947 if (bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
948 /* context is being closed, do not use */
949 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
950 return 0;
951 }
952
953 subctxt = find_first_zero_bit(uctxt->in_use_ctxts,
954 HFI1_MAX_SHARED_CTXTS);
955 if (subctxt >= uctxt->subctxt_cnt) {
956 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
957 return -EBUSY;
958 }
959
960 fd->subctxt = subctxt;
961 __set_bit(fd->subctxt, uctxt->in_use_ctxts);
962 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
963
964 fd->uctxt = uctxt;
965 hfi1_rcd_get(uctxt);
966
967 return 1;
968}
969
970/**
971 * find_sub_ctxt
972 * @fd: valid filedata pointer
973 * @uinfo: matching info to use to find a possible context to share.
974 *
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700975 * The hfi1_mutex must be held when this function is called. It is
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700976 * necessary to ensure serialized creation of shared contexts.
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700977 *
978 * Return:
979 * 0 No sub-context found
980 * 1 Subcontext found and allocated
981 * errno EINVAL (incorrect parameters)
982 * EBUSY (all sub contexts in use)
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700983 */
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700984static int find_sub_ctxt(struct hfi1_filedata *fd,
985 const struct hfi1_user_info *uinfo)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400986{
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700987 struct hfi1_ctxtdata *uctxt;
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700988 struct hfi1_devdata *dd = fd->dd;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700989 u16 i;
990 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400991
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700992 if (!uinfo->subctxt_cnt)
993 return 0;
994
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700995 for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) {
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700996 uctxt = hfi1_rcd_get_by_index(dd, i);
997 if (uctxt) {
998 ret = match_ctxt(fd, uinfo, uctxt);
999 hfi1_rcd_put(uctxt);
1000 /* value of != 0 will return */
1001 if (ret)
1002 return ret;
1003 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001004 }
1005
Michael J. Ruhl5fbded42017-05-04 05:14:57 -07001006 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001007}
1008
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -07001009static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001010 struct hfi1_user_info *uinfo,
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001011 struct hfi1_ctxtdata **rcd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001012{
1013 struct hfi1_ctxtdata *uctxt;
Mitko Haralanov957558c2016-02-03 14:33:40 -08001014 int ret, numa;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001015
1016 if (dd->flags & HFI1_FROZEN) {
1017 /*
1018 * Pick an error that is unique from all other errors
1019 * that are returned so the user process knows that
1020 * it tried to allocate while the SPC was frozen. It
1021 * it should be able to retry with success in a short
1022 * while.
1023 */
1024 return -EIO;
1025 }
1026
Michael J. Ruhl5fbded42017-05-04 05:14:57 -07001027 if (!dd->freectxts)
1028 return -EBUSY;
1029
Sebastian Sanchezb094a362016-07-25 07:54:57 -07001030 /*
1031 * If we don't have a NUMA node requested, preference is towards
1032 * device NUMA node.
1033 */
1034 fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node);
Mitko Haralanov957558c2016-02-03 14:33:40 -08001035 if (fd->rec_cpu_num != -1)
1036 numa = cpu_to_node(fd->rec_cpu_num);
1037 else
1038 numa = numa_node_id();
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001039 ret = hfi1_create_ctxtdata(dd->pport, numa, &uctxt);
1040 if (ret < 0) {
1041 dd_dev_err(dd, "user ctxtdata allocation failed\n");
1042 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001043 }
Mitko Haralanov957558c2016-02-03 14:33:40 -08001044 hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)",
1045 uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num,
1046 uctxt->numa_id);
1047
Mike Marciniszyn77241052015-07-30 15:17:43 -04001048 /*
1049 * Allocate and enable a PIO send context.
1050 */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001051 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node);
Jakub Pawlak3a6982d2016-09-25 07:42:23 -07001052 if (!uctxt->sc) {
1053 ret = -ENOMEM;
1054 goto ctxdata_free;
1055 }
Sebastian Sanchez6c63e422015-11-06 20:06:56 -05001056 hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index,
1057 uctxt->sc->hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001058 ret = sc_enable(uctxt->sc);
1059 if (ret)
Jakub Pawlak3a6982d2016-09-25 07:42:23 -07001060 goto ctxdata_free;
1061
Mike Marciniszyn77241052015-07-30 15:17:43 -04001062 /*
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001063 * Setup sub context information if the user-level has requested
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001064 * sub contexts.
Mike Marciniszyn77241052015-07-30 15:17:43 -04001065 * This has to be done here so the rest of the sub-contexts find the
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001066 * proper base context.
Mike Marciniszyn77241052015-07-30 15:17:43 -04001067 */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001068 if (uinfo->subctxt_cnt)
1069 init_subctxts(uctxt, uinfo);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001070 uctxt->userversion = uinfo->userversion;
Dean Luickbdf77522016-07-28 15:21:13 -04001071 uctxt->flags = hfi1_cap_mask; /* save current flag state */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001072 init_waitqueue_head(&uctxt->wait);
1073 strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
1074 memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
1075 uctxt->jkey = generate_jkey(current_uid());
Mike Marciniszyn77241052015-07-30 15:17:43 -04001076 hfi1_stats.sps_ctxts++;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08001077 /*
1078 * Disable ASPM when there are open user/PSM contexts to avoid
1079 * issues with ASPM L1 exit latency
1080 */
1081 if (dd->freectxts-- == dd->num_user_contexts)
1082 aspm_disable_all(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001083
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001084 *rcd = uctxt;
Michael J. Ruhlf683c802017-06-09 16:00:19 -07001085
Mike Marciniszyn77241052015-07-30 15:17:43 -04001086 return 0;
Jakub Pawlak3a6982d2016-09-25 07:42:23 -07001087
1088ctxdata_free:
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001089 hfi1_free_ctxt(uctxt);
Jakub Pawlak3a6982d2016-09-25 07:42:23 -07001090 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001091}
1092
Michael J. Ruhl42492012017-07-24 07:45:43 -07001093static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt)
1094{
1095 mutex_lock(&hfi1_mutex);
1096 hfi1_stats.sps_ctxts--;
1097 if (++uctxt->dd->freectxts == uctxt->dd->num_user_contexts)
1098 aspm_enable_all(uctxt->dd);
Michael J. Ruhl42492012017-07-24 07:45:43 -07001099 mutex_unlock(&hfi1_mutex);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001100
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001101 hfi1_free_ctxt(uctxt);
Michael J. Ruhl42492012017-07-24 07:45:43 -07001102}
1103
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001104static void init_subctxts(struct hfi1_ctxtdata *uctxt,
1105 const struct hfi1_user_info *uinfo)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001106{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001107 uctxt->subctxt_cnt = uinfo->subctxt_cnt;
1108 uctxt->subctxt_id = uinfo->subctxt_id;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001109 set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001110}
1111
1112static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
1113{
1114 int ret = 0;
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001115 u16 num_subctxts = uctxt->subctxt_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001116
1117 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001118 if (!uctxt->subctxt_uregbase)
1119 return -ENOMEM;
1120
Mike Marciniszyn77241052015-07-30 15:17:43 -04001121 /* We can take the size of the RcvHdr Queue from the master */
1122 uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
1123 num_subctxts);
1124 if (!uctxt->subctxt_rcvhdr_base) {
1125 ret = -ENOMEM;
1126 goto bail_ureg;
1127 }
1128
1129 uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
1130 num_subctxts);
1131 if (!uctxt->subctxt_rcvegrbuf) {
1132 ret = -ENOMEM;
1133 goto bail_rhdr;
1134 }
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001135
1136 return 0;
1137
Mike Marciniszyn77241052015-07-30 15:17:43 -04001138bail_rhdr:
1139 vfree(uctxt->subctxt_rcvhdr_base);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001140 uctxt->subctxt_rcvhdr_base = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001141bail_ureg:
1142 vfree(uctxt->subctxt_uregbase);
1143 uctxt->subctxt_uregbase = NULL;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001144
Mike Marciniszyn77241052015-07-30 15:17:43 -04001145 return ret;
1146}
1147
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001148static void user_init(struct hfi1_ctxtdata *uctxt)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001149{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001150 unsigned int rcvctrl_ops = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001151
1152 /* initialize poll variables... */
1153 uctxt->urgent = 0;
1154 uctxt->urgent_poll = 0;
1155
1156 /*
1157 * Now enable the ctxt for receive.
1158 * For chips that are set to DMA the tail register to memory
1159 * when they change (and when the update bit transitions from
1160 * 0 to 1. So for those chips, we turn it off and then back on.
1161 * This will (very briefly) affect any other open ctxts, but the
1162 * duration is very short, and therefore isn't an issue. We
1163 * explicitly set the in-memory tail copy to 0 beforehand, so we
1164 * don't have to wait to be sure the DMA update has happened
1165 * (chip resets head/tail to 0 on transition to enable).
1166 */
1167 if (uctxt->rcvhdrtail_kvaddr)
1168 clear_rcvhdrtail(uctxt);
1169
1170 /* Setup J_KEY before enabling the context */
Michael J. Ruhl17573972017-07-24 07:46:01 -07001171 hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001172
1173 rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
Dean Luickbdf77522016-07-28 15:21:13 -04001174 if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001175 rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
1176 /*
1177 * Ignore the bit in the flags for now until proper
1178 * support for multiple packet per rcv array entry is
1179 * added.
1180 */
Dean Luickbdf77522016-07-28 15:21:13 -04001181 if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001182 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
Dean Luickbdf77522016-07-28 15:21:13 -04001183 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001184 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
Dean Luickbdf77522016-07-28 15:21:13 -04001185 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001186 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
Mitko Haralanov566c1572016-02-03 14:32:49 -08001187 /*
1188 * The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
1189 * We can't rely on the correct value to be set from prior
1190 * uses of the chip or ctxt. Therefore, add the rcvctrl op
1191 * for both cases.
1192 */
Dean Luickbdf77522016-07-28 15:21:13 -04001193 if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001194 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
Mitko Haralanov566c1572016-02-03 14:32:49 -08001195 else
1196 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
Michael J. Ruhl22505632017-07-24 07:46:06 -07001197 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001198}
1199
Michael J. Ruhlff1a5582017-09-26 07:03:57 -07001200static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001201{
1202 struct hfi1_ctxt_info cinfo;
Ira Weiny9e10af42015-10-30 18:58:40 -04001203 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Michael J. Ruhlff1a5582017-09-26 07:03:57 -07001204
1205 if (sizeof(cinfo) != len)
1206 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001207
Dan Carpenterebe6b2e2015-09-16 09:42:25 +03001208 memset(&cinfo, 0, sizeof(cinfo));
Dean Luickbdf77522016-07-28 15:21:13 -04001209 cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
1210 HFI1_CAP_MISC_MASK) << HFI1_CAP_USER_SHIFT) |
1211 HFI1_CAP_UGET_MASK(uctxt->flags, MASK) |
1212 HFI1_CAP_KGET_MASK(uctxt->flags, K2U);
Dean Luick622c2022016-07-28 15:21:21 -04001213 /* adjust flag if this fd is not able to cache */
1214 if (!fd->handler)
1215 cinfo.runtime_flags |= HFI1_CAP_TID_UNMAP; /* no caching */
1216
Mike Marciniszyn77241052015-07-30 15:17:43 -04001217 cinfo.num_active = hfi1_count_active_units();
1218 cinfo.unit = uctxt->dd->unit;
1219 cinfo.ctxt = uctxt->ctxt;
Ira Weiny9e10af42015-10-30 18:58:40 -04001220 cinfo.subctxt = fd->subctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001221 cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
1222 uctxt->dd->rcv_entries.group_size) +
1223 uctxt->expected_count;
1224 cinfo.credits = uctxt->sc->credits;
1225 cinfo.numa_node = uctxt->numa_id;
1226 cinfo.rec_cpu = fd->rec_cpu_num;
1227 cinfo.send_ctxt = uctxt->sc->hw_context;
1228
1229 cinfo.egrtids = uctxt->egrbufs.alloced;
1230 cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
1231 cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
Ira Weiny9e10af42015-10-30 18:58:40 -04001232 cinfo.sdma_ring_size = fd->cq->nentries;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001233 cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
1234
Ira Weiny9e10af42015-10-30 18:58:40 -04001235 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
Michael J. Ruhlff1a5582017-09-26 07:03:57 -07001236 if (copy_to_user((void __user *)arg, &cinfo, len))
1237 return -EFAULT;
Dean Luickbdf77522016-07-28 15:21:13 -04001238
Michael J. Ruhlff1a5582017-09-26 07:03:57 -07001239 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001240}
1241
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001242static int init_user_ctxt(struct hfi1_filedata *fd,
1243 struct hfi1_ctxtdata *uctxt)
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001244{
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001245 int ret;
1246
1247 ret = hfi1_user_sdma_alloc_queues(uctxt, fd);
1248 if (ret)
1249 return ret;
1250
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001251 ret = hfi1_user_exp_rcv_init(fd, uctxt);
1252 if (ret)
1253 hfi1_user_sdma_free_queues(fd, uctxt);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001254
1255 return ret;
1256}
1257
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001258static int setup_base_ctxt(struct hfi1_filedata *fd,
1259 struct hfi1_ctxtdata *uctxt)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001260{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001261 struct hfi1_devdata *dd = uctxt->dd;
1262 int ret = 0;
1263
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001264 hfi1_init_ctxt(uctxt->sc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001265
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001266 /* Now allocate the RcvHdr queue and eager buffers. */
1267 ret = hfi1_create_rcvhdrq(dd, uctxt);
Mitko Haralanov94158442016-04-20 06:05:36 -07001268 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001269 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001270
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001271 ret = hfi1_setup_eagerbufs(uctxt);
1272 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001273 goto setup_failed;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001274
1275 /* If sub-contexts are enabled, do the appropriate setup */
1276 if (uctxt->subctxt_cnt)
1277 ret = setup_subctxt(uctxt);
1278 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001279 goto setup_failed;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001280
Mike Marciniszyn9c1a99c32017-06-09 15:59:40 -07001281 ret = hfi1_alloc_ctxt_rcv_groups(uctxt);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001282 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001283 goto setup_failed;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001284
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001285 ret = init_user_ctxt(fd, uctxt);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001286 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001287 goto setup_failed;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001288
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001289 user_init(uctxt);
1290
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001291 /* Now that the context is set up, the fd can get a reference. */
1292 fd->uctxt = uctxt;
1293 hfi1_rcd_get(uctxt);
1294
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001295 return 0;
1296
1297setup_failed:
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001298 /* Set the failed bit so sub-context init can do the right thing */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001299 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
1300 deallocate_ctxt(uctxt);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001301
Mike Marciniszyn77241052015-07-30 15:17:43 -04001302 return ret;
1303}
1304
Michael J. Ruhl45afb322017-09-26 07:04:10 -07001305static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001306{
1307 struct hfi1_base_info binfo;
Ira Weiny9e10af42015-10-30 18:58:40 -04001308 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001309 struct hfi1_devdata *dd = uctxt->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001310 unsigned offset;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001311
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001312 trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001313
Michael J. Ruhl45afb322017-09-26 07:04:10 -07001314 if (sizeof(binfo) != len)
1315 return -EINVAL;
1316
Mike Marciniszyn77241052015-07-30 15:17:43 -04001317 memset(&binfo, 0, sizeof(binfo));
1318 binfo.hw_version = dd->revision;
1319 binfo.sw_version = HFI1_KERN_SWVERSION;
1320 binfo.bthqp = kdeth_qp;
1321 binfo.jkey = uctxt->jkey;
1322 /*
1323 * If more than 64 contexts are enabled the allocated credit
1324 * return will span two or three contiguous pages. Since we only
1325 * map the page containing the context's credit return address,
1326 * we need to calculate the offset in the proper page.
1327 */
1328 offset = ((u64)uctxt->sc->hw_free -
1329 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
1330 binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001331 fd->subctxt, offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001332 binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001333 fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001334 uctxt->sc->base_addr);
1335 binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
1336 uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001337 fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001338 uctxt->sc->base_addr);
1339 binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001340 fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001341 uctxt->rcvhdrq);
1342 binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001343 fd->subctxt,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001344 uctxt->egrbufs.rcvtids[0].dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001345 binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001346 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001347 /*
1348 * user regs are at
1349 * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
1350 */
1351 binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001352 fd->subctxt, 0);
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -07001353 offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) *
1354 sizeof(*dd->events));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001355 binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001356 fd->subctxt,
1357 offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001358 binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001359 fd->subctxt,
1360 dd->status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001361 if (HFI1_CAP_IS_USET(DMA_RTAIL))
1362 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001363 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001364 if (uctxt->subctxt_cnt) {
1365 binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001366 uctxt->ctxt,
1367 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001368 binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001369 uctxt->ctxt,
1370 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001371 binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001372 uctxt->ctxt,
1373 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001374 }
Michael J. Ruhl45afb322017-09-26 07:04:10 -07001375
1376 if (copy_to_user((void __user *)arg, &binfo, len))
1377 return -EFAULT;
1378
1379 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001380}
1381
Michael J. Ruhlf404ca42017-09-26 07:04:16 -07001382/**
1383 * user_exp_rcv_setup - Set up the given tid rcv list
1384 * @fd: file data of the current driver instance
1385 * @arg: ioctl argumnent for user space information
1386 * @len: length of data structure associated with ioctl command
1387 *
1388 * Wrapper to validate ioctl information before doing _rcv_setup.
1389 *
1390 */
1391static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
1392 u32 len)
1393{
1394 int ret;
1395 unsigned long addr;
1396 struct hfi1_tid_info tinfo;
1397
1398 if (sizeof(tinfo) != len)
1399 return -EINVAL;
1400
1401 if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
1402 return -EFAULT;
1403
1404 ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
1405 if (!ret) {
1406 /*
1407 * Copy the number of tidlist entries we used
1408 * and the length of the buffer we registered.
1409 */
1410 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
1411 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
1412 sizeof(tinfo.tidcnt)))
1413 return -EFAULT;
1414
1415 addr = arg + offsetof(struct hfi1_tid_info, length);
1416 if (copy_to_user((void __user *)addr, &tinfo.length,
1417 sizeof(tinfo.length)))
1418 ret = -EFAULT;
1419 }
1420
1421 return ret;
1422}
1423
Michael J. Ruhl3920eef2017-09-26 07:04:22 -07001424/**
1425 * user_exp_rcv_clear - Clear the given tid rcv list
1426 * @fd: file data of the current driver instance
1427 * @arg: ioctl argumnent for user space information
1428 * @len: length of data structure associated with ioctl command
1429 *
1430 * The hfi1_user_exp_rcv_clear() can be called from the error path. Because
1431 * of this, we need to use this wrapper to copy the user space information
1432 * before doing the clear.
1433 */
1434static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg,
1435 u32 len)
1436{
1437 int ret;
1438 unsigned long addr;
1439 struct hfi1_tid_info tinfo;
1440
1441 if (sizeof(tinfo) != len)
1442 return -EINVAL;
1443
1444 if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
1445 return -EFAULT;
1446
1447 ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
1448 if (!ret) {
1449 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
1450 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
1451 sizeof(tinfo.tidcnt)))
1452 return -EFAULT;
1453 }
1454
1455 return ret;
1456}
1457
Michael J. Ruhl8a41da02017-09-26 07:04:29 -07001458/**
1459 * user_exp_rcv_invalid - Invalidate the given tid rcv list
1460 * @fd: file data of the current driver instance
1461 * @arg: ioctl argumnent for user space information
1462 * @len: length of data structure associated with ioctl command
1463 *
1464 * Wrapper to validate ioctl information before doing _rcv_invalid.
1465 *
1466 */
1467static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
1468 u32 len)
1469{
1470 int ret;
1471 unsigned long addr;
1472 struct hfi1_tid_info tinfo;
1473
1474 if (sizeof(tinfo) != len)
1475 return -EINVAL;
1476
1477 if (!fd->invalid_tids)
1478 return -EINVAL;
1479
1480 if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
1481 return -EFAULT;
1482
1483 ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
1484 if (ret)
1485 return ret;
1486
1487 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
1488 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
1489 sizeof(tinfo.tidcnt)))
1490 ret = -EFAULT;
1491
1492 return ret;
1493}
1494
Mike Marciniszyn77241052015-07-30 15:17:43 -04001495static unsigned int poll_urgent(struct file *fp,
1496 struct poll_table_struct *pt)
1497{
Ira Weiny9e10af42015-10-30 18:58:40 -04001498 struct hfi1_filedata *fd = fp->private_data;
1499 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001500 struct hfi1_devdata *dd = uctxt->dd;
1501 unsigned pollflag;
1502
1503 poll_wait(fp, &uctxt->wait, pt);
1504
1505 spin_lock_irq(&dd->uctxt_lock);
1506 if (uctxt->urgent != uctxt->urgent_poll) {
1507 pollflag = POLLIN | POLLRDNORM;
1508 uctxt->urgent_poll = uctxt->urgent;
1509 } else {
1510 pollflag = 0;
1511 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
1512 }
1513 spin_unlock_irq(&dd->uctxt_lock);
1514
1515 return pollflag;
1516}
1517
1518static unsigned int poll_next(struct file *fp,
1519 struct poll_table_struct *pt)
1520{
Ira Weiny9e10af42015-10-30 18:58:40 -04001521 struct hfi1_filedata *fd = fp->private_data;
1522 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001523 struct hfi1_devdata *dd = uctxt->dd;
1524 unsigned pollflag;
1525
1526 poll_wait(fp, &uctxt->wait, pt);
1527
1528 spin_lock_irq(&dd->uctxt_lock);
1529 if (hdrqempty(uctxt)) {
1530 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
Michael J. Ruhl22505632017-07-24 07:46:06 -07001531 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001532 pollflag = 0;
Jubin Johne4909742016-02-14 20:22:00 -08001533 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001534 pollflag = POLLIN | POLLRDNORM;
Jubin Johne4909742016-02-14 20:22:00 -08001535 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001536 spin_unlock_irq(&dd->uctxt_lock);
1537
1538 return pollflag;
1539}
1540
1541/*
1542 * Find all user contexts in use, and set the specified bit in their
1543 * event mask.
1544 * See also find_ctxt() for a similar use, that is specific to send buffers.
1545 */
1546int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
1547{
1548 struct hfi1_ctxtdata *uctxt;
1549 struct hfi1_devdata *dd = ppd->dd;
Michael J. Ruhle6f76222017-07-24 07:45:55 -07001550 u16 ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001551
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001552 if (!dd->events)
1553 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001554
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07001555 for (ctxt = dd->first_dyn_alloc_ctxt; ctxt < dd->num_rcv_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001556 ctxt++) {
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001557 uctxt = hfi1_rcd_get_by_index(dd, ctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001558 if (uctxt) {
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -07001559 unsigned long *evs;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001560 int i;
1561 /*
1562 * subctxt_cnt is 0 if not shared, so do base
1563 * separately, first, then remaining subctxt, if any
1564 */
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -07001565 evs = dd->events + uctxt_offset(uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001566 set_bit(evtbit, evs);
1567 for (i = 1; i < uctxt->subctxt_cnt; i++)
1568 set_bit(evtbit, evs + i);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001569 hfi1_rcd_put(uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001570 }
1571 }
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001572
1573 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001574}
1575
1576/**
1577 * manage_rcvq - manage a context's receive queue
1578 * @uctxt: the context
1579 * @subctxt: the sub-context
1580 * @start_stop: action to carry out
1581 *
1582 * start_stop == 0 disables receive on the context, for use in queue
1583 * overflow conditions. start_stop==1 re-enables, to be used to
1584 * re-init the software copy of the head register
1585 */
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001586static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001587 int start_stop)
1588{
1589 struct hfi1_devdata *dd = uctxt->dd;
1590 unsigned int rcvctrl_op;
1591
1592 if (subctxt)
1593 goto bail;
1594 /* atomically clear receive enable ctxt. */
1595 if (start_stop) {
1596 /*
1597 * On enable, force in-memory copy of the tail register to
1598 * 0, so that protocol code doesn't have to worry about
1599 * whether or not the chip has yet updated the in-memory
1600 * copy or not on return from the system call. The chip
1601 * always resets it's tail register back to 0 on a
1602 * transition from disabled to enabled.
1603 */
1604 if (uctxt->rcvhdrtail_kvaddr)
1605 clear_rcvhdrtail(uctxt);
1606 rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
Jubin Johne4909742016-02-14 20:22:00 -08001607 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001608 rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
Jubin Johne4909742016-02-14 20:22:00 -08001609 }
Michael J. Ruhl22505632017-07-24 07:46:06 -07001610 hfi1_rcvctrl(dd, rcvctrl_op, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001611 /* always; new head should be equal to new tail; see above */
1612bail:
1613 return 0;
1614}
1615
1616/*
1617 * clear the event notifier events for this context.
1618 * User process then performs actions appropriate to bit having been
1619 * set, if desired, and checks again in future.
1620 */
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001621static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001622 unsigned long events)
1623{
1624 int i;
1625 struct hfi1_devdata *dd = uctxt->dd;
1626 unsigned long *evs;
1627
1628 if (!dd->events)
1629 return 0;
1630
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -07001631 evs = dd->events + uctxt_offset(uctxt) + subctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001632
1633 for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
1634 if (!test_bit(i, &events))
1635 continue;
1636 clear_bit(i, evs);
1637 }
1638 return 0;
1639}
1640
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001641static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001642{
1643 int ret = -ENOENT, i, intable = 0;
1644 struct hfi1_pportdata *ppd = uctxt->ppd;
1645 struct hfi1_devdata *dd = uctxt->dd;
1646
1647 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
1648 ret = -EINVAL;
1649 goto done;
1650 }
1651
1652 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
1653 if (pkey == ppd->pkeys[i]) {
1654 intable = 1;
1655 break;
1656 }
1657
1658 if (intable)
Michael J. Ruhl17573972017-07-24 07:46:01 -07001659 ret = hfi1_set_ctxt_pkey(dd, uctxt, pkey);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001660done:
1661 return ret;
1662}
1663
Mike Marciniszyn77241052015-07-30 15:17:43 -04001664static void user_remove(struct hfi1_devdata *dd)
1665{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001666
1667 hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001668}
1669
1670static int user_add(struct hfi1_devdata *dd)
1671{
1672 char name[10];
1673 int ret;
1674
Mike Marciniszyn77241052015-07-30 15:17:43 -04001675 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
Dennis Dalessandro0eb62652016-05-19 05:25:50 -07001676 ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops,
Ira Weinye116a642015-09-17 13:47:49 -04001677 &dd->user_cdev, &dd->user_device,
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -07001678 true, &dd->kobj);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001679 if (ret)
Dennis Dalessandro7312f292016-05-19 05:25:57 -07001680 user_remove(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001681
Mike Marciniszyn77241052015-07-30 15:17:43 -04001682 return ret;
1683}
1684
1685/*
1686 * Create per-unit files in /dev
1687 */
1688int hfi1_device_create(struct hfi1_devdata *dd)
1689{
Dennis Dalessandro0f7b1f92016-05-19 05:26:10 -07001690 return user_add(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001691}
1692
1693/*
1694 * Remove per-unit files in /dev
1695 * void, core kernel returns no errors for this stuff
1696 */
1697void hfi1_device_remove(struct hfi1_devdata *dd)
1698{
1699 user_remove(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001700}