blob: 38630a686e3e9da52b690721ddb5c06ae34ec4a9 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07002 * Copyright(c) 2015-2017 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040047#include <linux/poll.h>
48#include <linux/cdev.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040049#include <linux/vmalloc.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040050#include <linux/io.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010051#include <linux/sched/mm.h>
Michael J. Ruhl8737ce92017-05-04 05:15:15 -070052#include <linux/bitmap.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040053
Jason Gunthorpee6bd18f2016-04-10 19:13:13 -060054#include <rdma/ib.h>
55
Mike Marciniszyn77241052015-07-30 15:17:43 -040056#include "hfi.h"
57#include "pio.h"
58#include "device.h"
59#include "common.h"
60#include "trace.h"
Harish Chegondi637f4602017-08-21 18:27:23 -070061#include "mmu_rb.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040062#include "user_sdma.h"
Mitko Haralanov701e4412015-10-30 18:58:43 -040063#include "user_exp_rcv.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080064#include "aspm.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040065
66#undef pr_fmt
67#define pr_fmt(fmt) DRIVER_NAME ": " fmt
68
69#define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
70
71/*
72 * File operation functions
73 */
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -070074static int hfi1_file_open(struct inode *inode, struct file *fp);
75static int hfi1_file_close(struct inode *inode, struct file *fp);
76static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from);
77static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt);
78static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040079
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -070080static u64 kvirt_to_phys(void *addr);
Michael J. Ruhlddebe982017-09-26 07:03:50 -070081static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -070082static void init_subctxts(struct hfi1_ctxtdata *uctxt,
83 const struct hfi1_user_info *uinfo);
Michael J. Ruhle87473b2017-07-29 08:43:32 -070084static int init_user_ctxt(struct hfi1_filedata *fd,
85 struct hfi1_ctxtdata *uctxt);
Michael J. Ruhl62239fc2017-05-04 05:15:21 -070086static void user_init(struct hfi1_ctxtdata *uctxt);
Michael J. Ruhlff1a5582017-09-26 07:03:57 -070087static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
Michael J. Ruhl45afb322017-09-26 07:04:10 -070088static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
Michael J. Ruhlf404ca42017-09-26 07:04:16 -070089static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
90 u32 len);
Michael J. Ruhle87473b2017-07-29 08:43:32 -070091static int setup_base_ctxt(struct hfi1_filedata *fd,
92 struct hfi1_ctxtdata *uctxt);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -070093static int setup_subctxt(struct hfi1_ctxtdata *uctxt);
Michael J. Ruhl5fbded42017-05-04 05:14:57 -070094
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -070095static int find_sub_ctxt(struct hfi1_filedata *fd,
96 const struct hfi1_user_info *uinfo);
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -070097static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
Michael J. Ruhle87473b2017-07-29 08:43:32 -070098 struct hfi1_user_info *uinfo,
99 struct hfi1_ctxtdata **cd);
Michael J. Ruhl42492012017-07-24 07:45:43 -0700100static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -0700101static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
102static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700103static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -0700104 unsigned long events);
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700105static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey);
106static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -0700107 int start_stop);
108static int vma_fault(struct vm_fault *vmf);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700109static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
110 unsigned long arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400111
112static const struct file_operations hfi1_file_ops = {
113 .owner = THIS_MODULE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400114 .write_iter = hfi1_write_iter,
115 .open = hfi1_file_open,
116 .release = hfi1_file_close,
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700117 .unlocked_ioctl = hfi1_file_ioctl,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400118 .poll = hfi1_poll,
119 .mmap = hfi1_file_mmap,
120 .llseek = noop_llseek,
121};
122
Arvind Yadav733da3b2017-08-28 09:59:28 +0530123static const struct vm_operations_struct vm_ops = {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400124 .fault = vma_fault,
125};
126
127/*
128 * Types of memories mapped into user processes' space
129 */
130enum mmap_types {
131 PIO_BUFS = 1,
132 PIO_BUFS_SOP,
133 PIO_CRED,
134 RCV_HDRQ,
135 RCV_EGRBUF,
136 UREGS,
137 EVENTS,
138 STATUS,
139 RTAIL,
140 SUBCTXT_UREGS,
141 SUBCTXT_RCV_HDRQ,
142 SUBCTXT_EGRBUF,
143 SDMA_COMP
144};
145
146/*
147 * Masks and offsets defining the mmap tokens
148 */
149#define HFI1_MMAP_OFFSET_MASK 0xfffULL
150#define HFI1_MMAP_OFFSET_SHIFT 0
151#define HFI1_MMAP_SUBCTXT_MASK 0xfULL
152#define HFI1_MMAP_SUBCTXT_SHIFT 12
153#define HFI1_MMAP_CTXT_MASK 0xffULL
154#define HFI1_MMAP_CTXT_SHIFT 16
155#define HFI1_MMAP_TYPE_MASK 0xfULL
156#define HFI1_MMAP_TYPE_SHIFT 24
157#define HFI1_MMAP_MAGIC_MASK 0xffffffffULL
158#define HFI1_MMAP_MAGIC_SHIFT 32
159
160#define HFI1_MMAP_MAGIC 0xdabbad00
161
162#define HFI1_MMAP_TOKEN_SET(field, val) \
163 (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
164#define HFI1_MMAP_TOKEN_GET(field, token) \
165 (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
166#define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \
167 (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
168 HFI1_MMAP_TOKEN_SET(TYPE, type) | \
169 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
170 HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
Geliang Tange260e402015-10-03 10:34:59 +0800171 HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400172
Mike Marciniszyn77241052015-07-30 15:17:43 -0400173#define dbg(fmt, ...) \
174 pr_info(fmt, ##__VA_ARGS__)
175
Mike Marciniszyn77241052015-07-30 15:17:43 -0400176static inline int is_valid_mmap(u64 token)
177{
178 return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
179}
180
181static int hfi1_file_open(struct inode *inode, struct file *fp)
182{
Ira Weinyea3a0ee2016-07-28 12:27:35 -0400183 struct hfi1_filedata *fd;
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -0700184 struct hfi1_devdata *dd = container_of(inode->i_cdev,
185 struct hfi1_devdata,
186 user_cdev);
187
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -0700188 if (!((dd->flags & HFI1_PRESENT) && dd->kregbase1))
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700189 return -EINVAL;
190
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -0700191 if (!atomic_inc_not_zero(&dd->user_refcount))
192 return -ENXIO;
193
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -0700194 /* Just take a ref now. Not all opens result in a context assign */
195 kobject_get(&dd->kobj);
196
Mike Marciniszyn77241052015-07-30 15:17:43 -0400197 /* The real work is performed later in assign_ctxt() */
Ira Weinyea3a0ee2016-07-28 12:27:35 -0400198
199 fd = kzalloc(sizeof(*fd), GFP_KERNEL);
200
Ira Weiny3faa3d92016-07-28 15:21:19 -0400201 if (fd) {
202 fd->rec_cpu_num = -1; /* no cpu affinity by default */
203 fd->mm = current->mm;
Vegard Nossumf1f10072017-02-27 14:30:07 -0800204 mmgrab(fd->mm);
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700205 fd->dd = dd;
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -0700206 fp->private_data = fd;
207 } else {
208 fp->private_data = NULL;
209
210 if (atomic_dec_and_test(&dd->user_refcount))
211 complete(&dd->user_comp);
212
213 return -ENOMEM;
Ira Weiny3faa3d92016-07-28 15:21:19 -0400214 }
Ira Weinyea3a0ee2016-07-28 12:27:35 -0400215
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -0700216 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400217}
218
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700219static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
220 unsigned long arg)
221{
222 struct hfi1_filedata *fd = fp->private_data;
223 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700224 struct hfi1_tid_info tinfo;
225 int ret = 0;
226 unsigned long addr;
227 int uval = 0;
228 unsigned long ul_uval = 0;
229 u16 uval16 = 0;
230
Dennis Dalessandro8a1882e2016-05-19 05:26:37 -0700231 hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700232 if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
233 cmd != HFI1_IOCTL_GET_VERS &&
234 !uctxt)
235 return -EINVAL;
236
237 switch (cmd) {
238 case HFI1_IOCTL_ASSIGN_CTXT:
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700239 ret = assign_ctxt(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700240 break;
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700241
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700242 case HFI1_IOCTL_CTXT_INFO:
Michael J. Ruhlff1a5582017-09-26 07:03:57 -0700243 ret = get_ctxt_info(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700244 break;
Michael J. Ruhlff1a5582017-09-26 07:03:57 -0700245
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700246 case HFI1_IOCTL_USER_INFO:
Michael J. Ruhl45afb322017-09-26 07:04:10 -0700247 ret = get_base_info(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700248 break;
Michael J. Ruhl45afb322017-09-26 07:04:10 -0700249
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700250 case HFI1_IOCTL_CREDIT_UPD:
Markus Elfringf7ca5352016-07-23 08:30:52 +0200251 if (uctxt)
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700252 sc_return_credits(uctxt->sc);
253 break;
254
255 case HFI1_IOCTL_TID_UPDATE:
Michael J. Ruhlf404ca42017-09-26 07:04:16 -0700256 ret = user_exp_rcv_setup(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700257 break;
258
259 case HFI1_IOCTL_TID_FREE:
260 if (copy_from_user(&tinfo,
261 (struct hfi11_tid_info __user *)arg,
262 sizeof(tinfo)))
263 return -EFAULT;
264
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -0700265 ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700266 if (ret)
267 break;
268 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
269 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
270 sizeof(tinfo.tidcnt)))
271 ret = -EFAULT;
272 break;
273
274 case HFI1_IOCTL_TID_INVAL_READ:
275 if (copy_from_user(&tinfo,
276 (struct hfi11_tid_info __user *)arg,
277 sizeof(tinfo)))
278 return -EFAULT;
279
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -0700280 ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700281 if (ret)
282 break;
283 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
284 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
285 sizeof(tinfo.tidcnt)))
286 ret = -EFAULT;
287 break;
288
289 case HFI1_IOCTL_RECV_CTRL:
290 ret = get_user(uval, (int __user *)arg);
291 if (ret != 0)
292 return -EFAULT;
293 ret = manage_rcvq(uctxt, fd->subctxt, uval);
294 break;
295
296 case HFI1_IOCTL_POLL_TYPE:
297 ret = get_user(uval, (int __user *)arg);
298 if (ret != 0)
299 return -EFAULT;
300 uctxt->poll_type = (typeof(uctxt->poll_type))uval;
301 break;
302
303 case HFI1_IOCTL_ACK_EVENT:
304 ret = get_user(ul_uval, (unsigned long __user *)arg);
305 if (ret != 0)
306 return -EFAULT;
307 ret = user_event_ack(uctxt, fd->subctxt, ul_uval);
308 break;
309
310 case HFI1_IOCTL_SET_PKEY:
311 ret = get_user(uval16, (u16 __user *)arg);
312 if (ret != 0)
313 return -EFAULT;
314 if (HFI1_CAP_IS_USET(PKEY_CHECK))
315 ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16);
316 else
317 return -EPERM;
318 break;
319
320 case HFI1_IOCTL_CTXT_RESET: {
321 struct send_context *sc;
322 struct hfi1_devdata *dd;
323
324 if (!uctxt || !uctxt->dd || !uctxt->sc)
325 return -EINVAL;
326
327 /*
328 * There is no protection here. User level has to
329 * guarantee that no one will be writing to the send
330 * context while it is being re-initialized.
331 * If user level breaks that guarantee, it will break
332 * it's own context and no one else's.
333 */
334 dd = uctxt->dd;
335 sc = uctxt->sc;
336 /*
337 * Wait until the interrupt handler has marked the
338 * context as halted or frozen. Report error if we time
339 * out.
340 */
341 wait_event_interruptible_timeout(
342 sc->halt_wait, (sc->flags & SCF_HALTED),
343 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
344 if (!(sc->flags & SCF_HALTED))
345 return -ENOLCK;
346
347 /*
348 * If the send context was halted due to a Freeze,
349 * wait until the device has been "unfrozen" before
350 * resetting the context.
351 */
352 if (sc->flags & SCF_FROZEN) {
353 wait_event_interruptible_timeout(
354 dd->event_queue,
355 !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
356 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
357 if (dd->flags & HFI1_FROZEN)
358 return -ENOLCK;
359
360 if (dd->flags & HFI1_FORCED_FREEZE)
361 /*
362 * Don't allow context reset if we are into
363 * forced freeze
364 */
365 return -ENODEV;
366
367 sc_disable(sc);
368 ret = sc_enable(sc);
Michael J. Ruhl22505632017-07-24 07:46:06 -0700369 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700370 } else {
371 ret = sc_restart(sc);
372 }
373 if (!ret)
374 sc_return_credits(sc);
375 break;
376 }
377
378 case HFI1_IOCTL_GET_VERS:
379 uval = HFI1_USER_SWVERSION;
380 if (put_user(uval, (int __user *)arg))
381 return -EFAULT;
382 break;
383
384 default:
385 return -EINVAL;
386 }
387
388 return ret;
389}
390
Mike Marciniszyn77241052015-07-30 15:17:43 -0400391static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
392{
Ira Weiny9e10af42015-10-30 18:58:40 -0400393 struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
394 struct hfi1_user_sdma_pkt_q *pq = fd->pq;
395 struct hfi1_user_sdma_comp_q *cq = fd->cq;
Ira Weiny0904f322016-07-01 16:00:55 -0700396 int done = 0, reqs = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400397 unsigned long dim = from->nr_segs;
398
Ira Weiny0904f322016-07-01 16:00:55 -0700399 if (!cq || !pq)
400 return -EIO;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400401
Ira Weiny0904f322016-07-01 16:00:55 -0700402 if (!iter_is_iovec(from) || !dim)
403 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400404
Michael J. Ruhl34ab4de2017-08-28 11:23:27 -0700405 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400406
Ira Weiny0904f322016-07-01 16:00:55 -0700407 if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
408 return -ENOSPC;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400409
410 while (dim) {
Ira Weiny0904f322016-07-01 16:00:55 -0700411 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400412 unsigned long count = 0;
413
414 ret = hfi1_user_sdma_process_request(
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -0700415 fd, (struct iovec *)(from->iov + done),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400416 dim, &count);
Ira Weiny0904f322016-07-01 16:00:55 -0700417 if (ret) {
418 reqs = ret;
419 break;
420 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400421 dim -= count;
422 done += count;
423 reqs++;
424 }
Ira Weiny0904f322016-07-01 16:00:55 -0700425
426 return reqs;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400427}
428
429static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
430{
Ira Weiny9e10af42015-10-30 18:58:40 -0400431 struct hfi1_filedata *fd = fp->private_data;
432 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400433 struct hfi1_devdata *dd;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700434 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400435 u64 token = vma->vm_pgoff << PAGE_SHIFT,
436 memaddr = 0;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700437 void *memvirt = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400438 u8 subctxt, mapio = 0, vmf = 0, type;
439 ssize_t memlen = 0;
440 int ret = 0;
441 u16 ctxt;
442
Mike Marciniszyn77241052015-07-30 15:17:43 -0400443 if (!is_valid_mmap(token) || !uctxt ||
444 !(vma->vm_flags & VM_SHARED)) {
445 ret = -EINVAL;
446 goto done;
447 }
448 dd = uctxt->dd;
449 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
450 subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
451 type = HFI1_MMAP_TOKEN_GET(TYPE, token);
Ira Weiny9e10af42015-10-30 18:58:40 -0400452 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400453 ret = -EINVAL;
454 goto done;
455 }
456
457 flags = vma->vm_flags;
458
459 switch (type) {
460 case PIO_BUFS:
461 case PIO_BUFS_SOP:
462 memaddr = ((dd->physaddr + TXE_PIO_SEND) +
463 /* chip pio base */
Amitoj Kaur Chawlad32cf442015-10-16 22:09:08 +0530464 (uctxt->sc->hw_context * BIT(16))) +
Mike Marciniszyn77241052015-07-30 15:17:43 -0400465 /* 64K PIO space / ctxt */
466 (type == PIO_BUFS_SOP ?
467 (TXE_PIO_SIZE / 2) : 0); /* sop? */
468 /*
469 * Map only the amount allocated to the context, not the
470 * entire available context's PIO space.
471 */
Amitoj Kaur Chawla437b29d2016-03-04 22:45:00 +0530472 memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400473 flags &= ~VM_MAYREAD;
474 flags |= VM_DONTCOPY | VM_DONTEXPAND;
475 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
476 mapio = 1;
477 break;
478 case PIO_CRED:
479 if (flags & VM_WRITE) {
480 ret = -EPERM;
481 goto done;
482 }
483 /*
484 * The credit return location for this context could be on the
485 * second or third page allocated for credit returns (if number
486 * of enabled contexts > 64 and 128 respectively).
487 */
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700488 memvirt = dd->cr_base[uctxt->numa_id].va;
489 memaddr = virt_to_phys(memvirt) +
Mike Marciniszyn77241052015-07-30 15:17:43 -0400490 (((u64)uctxt->sc->hw_free -
491 (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
492 memlen = PAGE_SIZE;
493 flags &= ~VM_MAYWRITE;
494 flags |= VM_DONTCOPY | VM_DONTEXPAND;
495 /*
496 * The driver has already allocated memory for credit
497 * returns and programmed it into the chip. Has that
498 * memory been flagged as non-cached?
499 */
500 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
501 mapio = 1;
502 break;
503 case RCV_HDRQ:
Mike Marciniszyn77241052015-07-30 15:17:43 -0400504 memlen = uctxt->rcvhdrq_size;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700505 memvirt = uctxt->rcvhdrq;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400506 break;
507 case RCV_EGRBUF: {
508 unsigned long addr;
509 int i;
510 /*
511 * The RcvEgr buffer need to be handled differently
512 * as multiple non-contiguous pages need to be mapped
513 * into the user process.
514 */
515 memlen = uctxt->egrbufs.size;
516 if ((vma->vm_end - vma->vm_start) != memlen) {
517 dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
518 (vma->vm_end - vma->vm_start), memlen);
519 ret = -EINVAL;
520 goto done;
521 }
522 if (vma->vm_flags & VM_WRITE) {
523 ret = -EPERM;
524 goto done;
525 }
526 vma->vm_flags &= ~VM_MAYWRITE;
527 addr = vma->vm_start;
528 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700529 memlen = uctxt->egrbufs.buffers[i].len;
530 memvirt = uctxt->egrbufs.buffers[i].addr;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400531 ret = remap_pfn_range(
532 vma, addr,
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700533 /*
534 * virt_to_pfn() does the same, but
535 * it's not available on x86_64
536 * when CONFIG_MMU is enabled.
537 */
538 PFN_DOWN(__pa(memvirt)),
539 memlen,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400540 vma->vm_page_prot);
541 if (ret < 0)
542 goto done;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700543 addr += memlen;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400544 }
545 ret = 0;
546 goto done;
547 }
548 case UREGS:
549 /*
550 * Map only the page that contains this context's user
551 * registers.
552 */
553 memaddr = (unsigned long)
554 (dd->physaddr + RXE_PER_CONTEXT_USER)
555 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
556 /*
557 * TidFlow table is on the same page as the rest of the
558 * user registers.
559 */
560 memlen = PAGE_SIZE;
561 flags |= VM_DONTCOPY | VM_DONTEXPAND;
562 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
563 mapio = 1;
564 break;
565 case EVENTS:
566 /*
567 * Use the page where this context's flags are. User level
568 * knows where it's own bitmap is within the page.
569 */
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -0700570 memaddr = (unsigned long)
571 (dd->events + uctxt_offset(uctxt)) & PAGE_MASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400572 memlen = PAGE_SIZE;
573 /*
574 * v3.7 removes VM_RESERVED but the effect is kept by
575 * using VM_IO.
576 */
577 flags |= VM_IO | VM_DONTEXPAND;
578 vmf = 1;
579 break;
580 case STATUS:
Ira Weiny12220262017-04-09 10:17:24 -0700581 if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) {
582 ret = -EPERM;
583 goto done;
584 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400585 memaddr = kvirt_to_phys((void *)dd->status);
586 memlen = PAGE_SIZE;
587 flags |= VM_IO | VM_DONTEXPAND;
588 break;
589 case RTAIL:
590 if (!HFI1_CAP_IS_USET(DMA_RTAIL)) {
591 /*
592 * If the memory allocation failed, the context alloc
593 * also would have failed, so we would never get here
594 */
595 ret = -EINVAL;
596 goto done;
597 }
598 if (flags & VM_WRITE) {
599 ret = -EPERM;
600 goto done;
601 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400602 memlen = PAGE_SIZE;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700603 memvirt = (void *)uctxt->rcvhdrtail_kvaddr;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400604 flags &= ~VM_MAYWRITE;
605 break;
606 case SUBCTXT_UREGS:
607 memaddr = (u64)uctxt->subctxt_uregbase;
608 memlen = PAGE_SIZE;
609 flags |= VM_IO | VM_DONTEXPAND;
610 vmf = 1;
611 break;
612 case SUBCTXT_RCV_HDRQ:
613 memaddr = (u64)uctxt->subctxt_rcvhdr_base;
614 memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
615 flags |= VM_IO | VM_DONTEXPAND;
616 vmf = 1;
617 break;
618 case SUBCTXT_EGRBUF:
619 memaddr = (u64)uctxt->subctxt_rcvegrbuf;
620 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
621 flags |= VM_IO | VM_DONTEXPAND;
622 flags &= ~VM_MAYWRITE;
623 vmf = 1;
624 break;
625 case SDMA_COMP: {
Ira Weiny9e10af42015-10-30 18:58:40 -0400626 struct hfi1_user_sdma_comp_q *cq = fd->cq;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400627
Ira Weiny9e10af42015-10-30 18:58:40 -0400628 if (!cq) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400629 ret = -EFAULT;
630 goto done;
631 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400632 memaddr = (u64)cq->comps;
Amitoj Kaur Chawla437b29d2016-03-04 22:45:00 +0530633 memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400634 flags |= VM_IO | VM_DONTEXPAND;
635 vmf = 1;
636 break;
637 }
638 default:
639 ret = -EINVAL;
640 break;
641 }
642
643 if ((vma->vm_end - vma->vm_start) != memlen) {
644 hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
Ira Weiny9e10af42015-10-30 18:58:40 -0400645 uctxt->ctxt, fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400646 (vma->vm_end - vma->vm_start), memlen);
647 ret = -EINVAL;
648 goto done;
649 }
650
651 vma->vm_flags = flags;
Sebastian Sanchez6c63e422015-11-06 20:06:56 -0500652 hfi1_cdbg(PROC,
653 "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
654 ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400655 vma->vm_end - vma->vm_start, vma->vm_flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400656 if (vmf) {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700657 vma->vm_pgoff = PFN_DOWN(memaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400658 vma->vm_ops = &vm_ops;
659 ret = 0;
660 } else if (mapio) {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700661 ret = io_remap_pfn_range(vma, vma->vm_start,
662 PFN_DOWN(memaddr),
663 memlen,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400664 vma->vm_page_prot);
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700665 } else if (memvirt) {
666 ret = remap_pfn_range(vma, vma->vm_start,
667 PFN_DOWN(__pa(memvirt)),
668 memlen,
669 vma->vm_page_prot);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400670 } else {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700671 ret = remap_pfn_range(vma, vma->vm_start,
672 PFN_DOWN(memaddr),
673 memlen,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400674 vma->vm_page_prot);
675 }
676done:
677 return ret;
678}
679
680/*
681 * Local (non-chip) user memory is not mapped right away but as it is
682 * accessed by the user-level code.
683 */
Dave Jiang11bac802017-02-24 14:56:41 -0800684static int vma_fault(struct vm_fault *vmf)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400685{
686 struct page *page;
687
688 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
689 if (!page)
690 return VM_FAULT_SIGBUS;
691
692 get_page(page);
693 vmf->page = page;
694
695 return 0;
696}
697
698static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
699{
700 struct hfi1_ctxtdata *uctxt;
701 unsigned pollflag;
702
Ira Weiny9e10af42015-10-30 18:58:40 -0400703 uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400704 if (!uctxt)
705 pollflag = POLLERR;
706 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
707 pollflag = poll_urgent(fp, pt);
708 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
709 pollflag = poll_next(fp, pt);
710 else /* invalid */
711 pollflag = POLLERR;
712
713 return pollflag;
714}
715
716static int hfi1_file_close(struct inode *inode, struct file *fp)
717{
718 struct hfi1_filedata *fdata = fp->private_data;
719 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -0700720 struct hfi1_devdata *dd = container_of(inode->i_cdev,
721 struct hfi1_devdata,
722 user_cdev);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400723 unsigned long flags, *ev;
724
725 fp->private_data = NULL;
726
727 if (!uctxt)
728 goto done;
729
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700730 hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400731
732 flush_wc();
733 /* drain user sdma queue */
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700734 hfi1_user_sdma_free_queues(fdata, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400735
Mitko Haralanov957558c2016-02-03 14:33:40 -0800736 /* release the cpu */
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700737 hfi1_put_proc_affinity(fdata->rec_cpu_num);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800738
Michael J. Ruhl224d71f2017-05-04 05:14:34 -0700739 /* clean up rcv side */
740 hfi1_user_exp_rcv_free(fdata);
741
Mike Marciniszyn77241052015-07-30 15:17:43 -0400742 /*
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700743 * fdata->uctxt is used in the above cleanup. It is not ready to be
744 * removed until here.
745 */
746 fdata->uctxt = NULL;
747 hfi1_rcd_put(uctxt);
748
749 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -0400750 * Clear any left over, unhandled events so the next process that
751 * gets this context doesn't get confused.
752 */
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -0700753 ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400754 *ev = 0;
755
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700756 spin_lock_irqsave(&dd->uctxt_lock, flags);
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700757 __clear_bit(fdata->subctxt, uctxt->in_use_ctxts);
758 if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700759 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400760 goto done;
761 }
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700762 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400763
Mike Marciniszyn77241052015-07-30 15:17:43 -0400764 /*
765 * Disable receive context and interrupt available, reset all
766 * RcvCtxtCtrl bits to default values.
767 */
768 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
769 HFI1_RCVCTRL_TIDFLOW_DIS |
770 HFI1_RCVCTRL_INTRAVAIL_DIS |
Mitko Haralanov566c1572016-02-03 14:32:49 -0800771 HFI1_RCVCTRL_TAILUPD_DIS |
Mike Marciniszyn77241052015-07-30 15:17:43 -0400772 HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
773 HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
Michael J. Ruhl22505632017-07-24 07:46:06 -0700774 HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400775 /* Clear the context's J_KEY */
Michael J. Ruhl17573972017-07-24 07:46:01 -0700776 hfi1_clear_ctxt_jkey(dd, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400777 /*
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700778 * If a send context is allocated, reset context integrity
779 * checks to default and disable the send context.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400780 */
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700781 if (uctxt->sc) {
782 set_pio_integrity(uctxt->sc);
783 sc_disable(uctxt->sc);
784 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400785
Mike Marciniszyn9c1a99c32017-06-09 15:59:40 -0700786 hfi1_free_ctxt_rcv_groups(uctxt);
Michael J. Ruhl637a9a72017-05-04 05:15:03 -0700787 hfi1_clear_ctxt_pkey(dd, uctxt);
Mitko Haralanov94158442016-04-20 06:05:36 -0700788
Mike Marciniszyn77241052015-07-30 15:17:43 -0400789 uctxt->event_flags = 0;
Michael J. Ruhl42492012017-07-24 07:45:43 -0700790
791 deallocate_ctxt(uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400792done:
Ira Weinye0cf75d2016-08-16 13:27:03 -0700793 mmdrop(fdata->mm);
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -0700794 kobject_put(&dd->kobj);
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -0700795
796 if (atomic_dec_and_test(&dd->user_refcount))
797 complete(&dd->user_comp);
798
Mike Marciniszyn77241052015-07-30 15:17:43 -0400799 kfree(fdata);
800 return 0;
801}
802
803/*
804 * Convert kernel *virtual* addresses to physical addresses.
805 * This is used to vmalloc'ed addresses.
806 */
807static u64 kvirt_to_phys(void *addr)
808{
809 struct page *page;
810 u64 paddr = 0;
811
812 page = vmalloc_to_page(addr);
813 if (page)
814 paddr = page_to_pfn(page) << PAGE_SHIFT;
815
816 return paddr;
817}
818
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700819/**
820 * complete_subctxt
821 * @fd: valid filedata pointer
822 *
823 * Sub-context info can only be set up after the base context
824 * has been completed. This is indicated by the clearing of the
825 * HFI1_CTXT_BASE_UINIT bit.
826 *
827 * Wait for the bit to be cleared, and then complete the subcontext
828 * initialization.
829 *
830 */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700831static int complete_subctxt(struct hfi1_filedata *fd)
832{
833 int ret;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700834 unsigned long flags;
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700835
836 /*
837 * sub-context info can only be set up after the base context
838 * has been completed.
839 */
840 ret = wait_event_interruptible(
841 fd->uctxt->wait,
842 !test_bit(HFI1_CTXT_BASE_UNINIT, &fd->uctxt->event_flags));
843
844 if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags))
845 ret = -ENOMEM;
846
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700847 /* Finish the sub-context init */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700848 if (!ret) {
849 fd->rec_cpu_num = hfi1_get_proc_affinity(fd->uctxt->numa_id);
850 ret = init_user_ctxt(fd, fd->uctxt);
851 }
852
853 if (ret) {
854 hfi1_rcd_put(fd->uctxt);
855 fd->uctxt = NULL;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700856 spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700857 __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700858 spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700859 }
860
861 return ret;
862}
863
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700864static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400865{
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700866 int ret;
Dennis Dalessandro0eb62652016-05-19 05:25:50 -0700867 unsigned int swmajor, swminor;
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700868 struct hfi1_ctxtdata *uctxt = NULL;
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700869 struct hfi1_user_info uinfo;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400870
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700871 if (fd->uctxt)
872 return -EINVAL;
873
874 if (sizeof(uinfo) != len)
875 return -EINVAL;
876
877 if (copy_from_user(&uinfo, (void __user *)arg, sizeof(uinfo)))
878 return -EFAULT;
879
880 swmajor = uinfo.userversion >> 16;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700881 if (swmajor != HFI1_USER_SWMAJOR)
882 return -ENODEV;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400883
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700884 if (uinfo.subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700885 return -EINVAL;
886
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700887 swminor = uinfo.userversion & 0xffff;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400888
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700889 /*
890 * Acquire the mutex to protect against multiple creations of what
891 * could be a shared base context.
892 */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400893 mutex_lock(&hfi1_mutex);
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700894 /*
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700895 * Get a sub context if available (fd->uctxt will be set).
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700896 * ret < 0 error, 0 no context, 1 sub-context found
897 */
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700898 ret = find_sub_ctxt(fd, &uinfo);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400899
900 /*
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700901 * Allocate a base context if context sharing is not required or a
902 * sub context wasn't found.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400903 */
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700904 if (!ret)
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700905 ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt);
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700906
Mike Marciniszyn77241052015-07-30 15:17:43 -0400907 mutex_unlock(&hfi1_mutex);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700908
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700909 /* Depending on the context type, finish the appropriate init */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700910 switch (ret) {
911 case 0:
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700912 ret = setup_base_ctxt(fd, uctxt);
913 if (uctxt->subctxt_cnt) {
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700914 /*
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700915 * Base context is done (successfully or not), notify
916 * anybody using a sub-context that is waiting for
917 * this completion.
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700918 */
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700919 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
920 wake_up(&uctxt->wait);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700921 }
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700922 break;
923 case 1:
924 ret = complete_subctxt(fd);
925 break;
926 default:
927 break;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700928 }
929
Mike Marciniszyn77241052015-07-30 15:17:43 -0400930 return ret;
931}
932
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700933/**
934 * match_ctxt
935 * @fd: valid filedata pointer
936 * @uinfo: user info to compare base context with
937 * @uctxt: context to compare uinfo to.
938 *
939 * Compare the given context with the given information to see if it
940 * can be used for a sub context.
941 */
942static int match_ctxt(struct hfi1_filedata *fd,
943 const struct hfi1_user_info *uinfo,
944 struct hfi1_ctxtdata *uctxt)
945{
946 struct hfi1_devdata *dd = fd->dd;
947 unsigned long flags;
948 u16 subctxt;
949
950 /* Skip dynamically allocated kernel contexts */
951 if (uctxt->sc && (uctxt->sc->type == SC_KERNEL))
952 return 0;
953
954 /* Skip ctxt if it doesn't match the requested one */
955 if (memcmp(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)) ||
956 uctxt->jkey != generate_jkey(current_uid()) ||
957 uctxt->subctxt_id != uinfo->subctxt_id ||
958 uctxt->subctxt_cnt != uinfo->subctxt_cnt)
959 return 0;
960
961 /* Verify the sharing process matches the base */
962 if (uctxt->userversion != uinfo->userversion)
963 return -EINVAL;
964
965 /* Find an unused sub context */
966 spin_lock_irqsave(&dd->uctxt_lock, flags);
967 if (bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
968 /* context is being closed, do not use */
969 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
970 return 0;
971 }
972
973 subctxt = find_first_zero_bit(uctxt->in_use_ctxts,
974 HFI1_MAX_SHARED_CTXTS);
975 if (subctxt >= uctxt->subctxt_cnt) {
976 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
977 return -EBUSY;
978 }
979
980 fd->subctxt = subctxt;
981 __set_bit(fd->subctxt, uctxt->in_use_ctxts);
982 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
983
984 fd->uctxt = uctxt;
985 hfi1_rcd_get(uctxt);
986
987 return 1;
988}
989
990/**
991 * find_sub_ctxt
992 * @fd: valid filedata pointer
993 * @uinfo: matching info to use to find a possible context to share.
994 *
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700995 * The hfi1_mutex must be held when this function is called. It is
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700996 * necessary to ensure serialized creation of shared contexts.
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700997 *
998 * Return:
999 * 0 No sub-context found
1000 * 1 Subcontext found and allocated
1001 * errno EINVAL (incorrect parameters)
1002 * EBUSY (all sub contexts in use)
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001003 */
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001004static int find_sub_ctxt(struct hfi1_filedata *fd,
1005 const struct hfi1_user_info *uinfo)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001006{
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001007 struct hfi1_ctxtdata *uctxt;
Michael J. Ruhl5fbded42017-05-04 05:14:57 -07001008 struct hfi1_devdata *dd = fd->dd;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001009 u16 i;
1010 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001011
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001012 if (!uinfo->subctxt_cnt)
1013 return 0;
1014
Michael J. Ruhl5fbded42017-05-04 05:14:57 -07001015 for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) {
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001016 uctxt = hfi1_rcd_get_by_index(dd, i);
1017 if (uctxt) {
1018 ret = match_ctxt(fd, uinfo, uctxt);
1019 hfi1_rcd_put(uctxt);
1020 /* value of != 0 will return */
1021 if (ret)
1022 return ret;
1023 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001024 }
1025
Michael J. Ruhl5fbded42017-05-04 05:14:57 -07001026 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001027}
1028
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -07001029static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001030 struct hfi1_user_info *uinfo,
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001031 struct hfi1_ctxtdata **rcd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001032{
1033 struct hfi1_ctxtdata *uctxt;
Mitko Haralanov957558c2016-02-03 14:33:40 -08001034 int ret, numa;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001035
1036 if (dd->flags & HFI1_FROZEN) {
1037 /*
1038 * Pick an error that is unique from all other errors
1039 * that are returned so the user process knows that
1040 * it tried to allocate while the SPC was frozen. It
1041 * it should be able to retry with success in a short
1042 * while.
1043 */
1044 return -EIO;
1045 }
1046
Michael J. Ruhl5fbded42017-05-04 05:14:57 -07001047 if (!dd->freectxts)
1048 return -EBUSY;
1049
Sebastian Sanchezb094a362016-07-25 07:54:57 -07001050 /*
1051 * If we don't have a NUMA node requested, preference is towards
1052 * device NUMA node.
1053 */
1054 fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node);
Mitko Haralanov957558c2016-02-03 14:33:40 -08001055 if (fd->rec_cpu_num != -1)
1056 numa = cpu_to_node(fd->rec_cpu_num);
1057 else
1058 numa = numa_node_id();
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001059 ret = hfi1_create_ctxtdata(dd->pport, numa, &uctxt);
1060 if (ret < 0) {
1061 dd_dev_err(dd, "user ctxtdata allocation failed\n");
1062 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001063 }
Mitko Haralanov957558c2016-02-03 14:33:40 -08001064 hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)",
1065 uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num,
1066 uctxt->numa_id);
1067
Mike Marciniszyn77241052015-07-30 15:17:43 -04001068 /*
1069 * Allocate and enable a PIO send context.
1070 */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001071 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node);
Jakub Pawlak3a6982d2016-09-25 07:42:23 -07001072 if (!uctxt->sc) {
1073 ret = -ENOMEM;
1074 goto ctxdata_free;
1075 }
Sebastian Sanchez6c63e422015-11-06 20:06:56 -05001076 hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index,
1077 uctxt->sc->hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001078 ret = sc_enable(uctxt->sc);
1079 if (ret)
Jakub Pawlak3a6982d2016-09-25 07:42:23 -07001080 goto ctxdata_free;
1081
Mike Marciniszyn77241052015-07-30 15:17:43 -04001082 /*
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001083 * Setup sub context information if the user-level has requested
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001084 * sub contexts.
Mike Marciniszyn77241052015-07-30 15:17:43 -04001085 * This has to be done here so the rest of the sub-contexts find the
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001086 * proper base context.
Mike Marciniszyn77241052015-07-30 15:17:43 -04001087 */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001088 if (uinfo->subctxt_cnt)
1089 init_subctxts(uctxt, uinfo);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001090 uctxt->userversion = uinfo->userversion;
Dean Luickbdf77522016-07-28 15:21:13 -04001091 uctxt->flags = hfi1_cap_mask; /* save current flag state */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001092 init_waitqueue_head(&uctxt->wait);
1093 strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
1094 memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
1095 uctxt->jkey = generate_jkey(current_uid());
Mike Marciniszyn77241052015-07-30 15:17:43 -04001096 hfi1_stats.sps_ctxts++;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08001097 /*
1098 * Disable ASPM when there are open user/PSM contexts to avoid
1099 * issues with ASPM L1 exit latency
1100 */
1101 if (dd->freectxts-- == dd->num_user_contexts)
1102 aspm_disable_all(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001103
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001104 *rcd = uctxt;
Michael J. Ruhlf683c802017-06-09 16:00:19 -07001105
Mike Marciniszyn77241052015-07-30 15:17:43 -04001106 return 0;
Jakub Pawlak3a6982d2016-09-25 07:42:23 -07001107
1108ctxdata_free:
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001109 hfi1_free_ctxt(uctxt);
Jakub Pawlak3a6982d2016-09-25 07:42:23 -07001110 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001111}
1112
Michael J. Ruhl42492012017-07-24 07:45:43 -07001113static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt)
1114{
1115 mutex_lock(&hfi1_mutex);
1116 hfi1_stats.sps_ctxts--;
1117 if (++uctxt->dd->freectxts == uctxt->dd->num_user_contexts)
1118 aspm_enable_all(uctxt->dd);
Michael J. Ruhl42492012017-07-24 07:45:43 -07001119 mutex_unlock(&hfi1_mutex);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001120
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001121 hfi1_free_ctxt(uctxt);
Michael J. Ruhl42492012017-07-24 07:45:43 -07001122}
1123
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001124static void init_subctxts(struct hfi1_ctxtdata *uctxt,
1125 const struct hfi1_user_info *uinfo)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001126{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001127 uctxt->subctxt_cnt = uinfo->subctxt_cnt;
1128 uctxt->subctxt_id = uinfo->subctxt_id;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001129 set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001130}
1131
1132static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
1133{
1134 int ret = 0;
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001135 u16 num_subctxts = uctxt->subctxt_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001136
1137 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001138 if (!uctxt->subctxt_uregbase)
1139 return -ENOMEM;
1140
Mike Marciniszyn77241052015-07-30 15:17:43 -04001141 /* We can take the size of the RcvHdr Queue from the master */
1142 uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
1143 num_subctxts);
1144 if (!uctxt->subctxt_rcvhdr_base) {
1145 ret = -ENOMEM;
1146 goto bail_ureg;
1147 }
1148
1149 uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
1150 num_subctxts);
1151 if (!uctxt->subctxt_rcvegrbuf) {
1152 ret = -ENOMEM;
1153 goto bail_rhdr;
1154 }
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001155
1156 return 0;
1157
Mike Marciniszyn77241052015-07-30 15:17:43 -04001158bail_rhdr:
1159 vfree(uctxt->subctxt_rcvhdr_base);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001160 uctxt->subctxt_rcvhdr_base = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001161bail_ureg:
1162 vfree(uctxt->subctxt_uregbase);
1163 uctxt->subctxt_uregbase = NULL;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001164
Mike Marciniszyn77241052015-07-30 15:17:43 -04001165 return ret;
1166}
1167
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001168static void user_init(struct hfi1_ctxtdata *uctxt)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001169{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001170 unsigned int rcvctrl_ops = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001171
1172 /* initialize poll variables... */
1173 uctxt->urgent = 0;
1174 uctxt->urgent_poll = 0;
1175
1176 /*
1177 * Now enable the ctxt for receive.
1178 * For chips that are set to DMA the tail register to memory
1179 * when they change (and when the update bit transitions from
1180 * 0 to 1. So for those chips, we turn it off and then back on.
1181 * This will (very briefly) affect any other open ctxts, but the
1182 * duration is very short, and therefore isn't an issue. We
1183 * explicitly set the in-memory tail copy to 0 beforehand, so we
1184 * don't have to wait to be sure the DMA update has happened
1185 * (chip resets head/tail to 0 on transition to enable).
1186 */
1187 if (uctxt->rcvhdrtail_kvaddr)
1188 clear_rcvhdrtail(uctxt);
1189
1190 /* Setup J_KEY before enabling the context */
Michael J. Ruhl17573972017-07-24 07:46:01 -07001191 hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001192
1193 rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
Dean Luickbdf77522016-07-28 15:21:13 -04001194 if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001195 rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
1196 /*
1197 * Ignore the bit in the flags for now until proper
1198 * support for multiple packet per rcv array entry is
1199 * added.
1200 */
Dean Luickbdf77522016-07-28 15:21:13 -04001201 if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001202 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
Dean Luickbdf77522016-07-28 15:21:13 -04001203 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001204 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
Dean Luickbdf77522016-07-28 15:21:13 -04001205 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001206 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
Mitko Haralanov566c1572016-02-03 14:32:49 -08001207 /*
1208 * The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
1209 * We can't rely on the correct value to be set from prior
1210 * uses of the chip or ctxt. Therefore, add the rcvctrl op
1211 * for both cases.
1212 */
Dean Luickbdf77522016-07-28 15:21:13 -04001213 if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001214 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
Mitko Haralanov566c1572016-02-03 14:32:49 -08001215 else
1216 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
Michael J. Ruhl22505632017-07-24 07:46:06 -07001217 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001218}
1219
Michael J. Ruhlff1a5582017-09-26 07:03:57 -07001220static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001221{
1222 struct hfi1_ctxt_info cinfo;
Ira Weiny9e10af42015-10-30 18:58:40 -04001223 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Michael J. Ruhlff1a5582017-09-26 07:03:57 -07001224
1225 if (sizeof(cinfo) != len)
1226 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001227
Dan Carpenterebe6b2e2015-09-16 09:42:25 +03001228 memset(&cinfo, 0, sizeof(cinfo));
Dean Luickbdf77522016-07-28 15:21:13 -04001229 cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
1230 HFI1_CAP_MISC_MASK) << HFI1_CAP_USER_SHIFT) |
1231 HFI1_CAP_UGET_MASK(uctxt->flags, MASK) |
1232 HFI1_CAP_KGET_MASK(uctxt->flags, K2U);
Dean Luick622c2022016-07-28 15:21:21 -04001233 /* adjust flag if this fd is not able to cache */
1234 if (!fd->handler)
1235 cinfo.runtime_flags |= HFI1_CAP_TID_UNMAP; /* no caching */
1236
Mike Marciniszyn77241052015-07-30 15:17:43 -04001237 cinfo.num_active = hfi1_count_active_units();
1238 cinfo.unit = uctxt->dd->unit;
1239 cinfo.ctxt = uctxt->ctxt;
Ira Weiny9e10af42015-10-30 18:58:40 -04001240 cinfo.subctxt = fd->subctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001241 cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
1242 uctxt->dd->rcv_entries.group_size) +
1243 uctxt->expected_count;
1244 cinfo.credits = uctxt->sc->credits;
1245 cinfo.numa_node = uctxt->numa_id;
1246 cinfo.rec_cpu = fd->rec_cpu_num;
1247 cinfo.send_ctxt = uctxt->sc->hw_context;
1248
1249 cinfo.egrtids = uctxt->egrbufs.alloced;
1250 cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
1251 cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
Ira Weiny9e10af42015-10-30 18:58:40 -04001252 cinfo.sdma_ring_size = fd->cq->nentries;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001253 cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
1254
Ira Weiny9e10af42015-10-30 18:58:40 -04001255 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
Michael J. Ruhlff1a5582017-09-26 07:03:57 -07001256 if (copy_to_user((void __user *)arg, &cinfo, len))
1257 return -EFAULT;
Dean Luickbdf77522016-07-28 15:21:13 -04001258
Michael J. Ruhlff1a5582017-09-26 07:03:57 -07001259 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001260}
1261
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001262static int init_user_ctxt(struct hfi1_filedata *fd,
1263 struct hfi1_ctxtdata *uctxt)
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001264{
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001265 int ret;
1266
1267 ret = hfi1_user_sdma_alloc_queues(uctxt, fd);
1268 if (ret)
1269 return ret;
1270
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001271 ret = hfi1_user_exp_rcv_init(fd, uctxt);
1272 if (ret)
1273 hfi1_user_sdma_free_queues(fd, uctxt);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001274
1275 return ret;
1276}
1277
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001278static int setup_base_ctxt(struct hfi1_filedata *fd,
1279 struct hfi1_ctxtdata *uctxt)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001280{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001281 struct hfi1_devdata *dd = uctxt->dd;
1282 int ret = 0;
1283
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001284 hfi1_init_ctxt(uctxt->sc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001285
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001286 /* Now allocate the RcvHdr queue and eager buffers. */
1287 ret = hfi1_create_rcvhdrq(dd, uctxt);
Mitko Haralanov94158442016-04-20 06:05:36 -07001288 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001289 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001290
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001291 ret = hfi1_setup_eagerbufs(uctxt);
1292 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001293 goto setup_failed;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001294
1295 /* If sub-contexts are enabled, do the appropriate setup */
1296 if (uctxt->subctxt_cnt)
1297 ret = setup_subctxt(uctxt);
1298 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001299 goto setup_failed;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001300
Mike Marciniszyn9c1a99c32017-06-09 15:59:40 -07001301 ret = hfi1_alloc_ctxt_rcv_groups(uctxt);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001302 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001303 goto setup_failed;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001304
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001305 ret = init_user_ctxt(fd, uctxt);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001306 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001307 goto setup_failed;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001308
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001309 user_init(uctxt);
1310
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001311 /* Now that the context is set up, the fd can get a reference. */
1312 fd->uctxt = uctxt;
1313 hfi1_rcd_get(uctxt);
1314
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001315 return 0;
1316
1317setup_failed:
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001318 /* Set the failed bit so sub-context init can do the right thing */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001319 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
1320 deallocate_ctxt(uctxt);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001321
Mike Marciniszyn77241052015-07-30 15:17:43 -04001322 return ret;
1323}
1324
Michael J. Ruhl45afb322017-09-26 07:04:10 -07001325static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001326{
1327 struct hfi1_base_info binfo;
Ira Weiny9e10af42015-10-30 18:58:40 -04001328 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001329 struct hfi1_devdata *dd = uctxt->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001330 unsigned offset;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001331
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001332 trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001333
Michael J. Ruhl45afb322017-09-26 07:04:10 -07001334 if (sizeof(binfo) != len)
1335 return -EINVAL;
1336
Mike Marciniszyn77241052015-07-30 15:17:43 -04001337 memset(&binfo, 0, sizeof(binfo));
1338 binfo.hw_version = dd->revision;
1339 binfo.sw_version = HFI1_KERN_SWVERSION;
1340 binfo.bthqp = kdeth_qp;
1341 binfo.jkey = uctxt->jkey;
1342 /*
1343 * If more than 64 contexts are enabled the allocated credit
1344 * return will span two or three contiguous pages. Since we only
1345 * map the page containing the context's credit return address,
1346 * we need to calculate the offset in the proper page.
1347 */
1348 offset = ((u64)uctxt->sc->hw_free -
1349 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
1350 binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001351 fd->subctxt, offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001352 binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001353 fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001354 uctxt->sc->base_addr);
1355 binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
1356 uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001357 fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001358 uctxt->sc->base_addr);
1359 binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001360 fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001361 uctxt->rcvhdrq);
1362 binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001363 fd->subctxt,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001364 uctxt->egrbufs.rcvtids[0].dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001365 binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001366 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001367 /*
1368 * user regs are at
1369 * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
1370 */
1371 binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001372 fd->subctxt, 0);
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -07001373 offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) *
1374 sizeof(*dd->events));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001375 binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001376 fd->subctxt,
1377 offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001378 binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001379 fd->subctxt,
1380 dd->status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001381 if (HFI1_CAP_IS_USET(DMA_RTAIL))
1382 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001383 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001384 if (uctxt->subctxt_cnt) {
1385 binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001386 uctxt->ctxt,
1387 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001388 binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001389 uctxt->ctxt,
1390 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001391 binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001392 uctxt->ctxt,
1393 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001394 }
Michael J. Ruhl45afb322017-09-26 07:04:10 -07001395
1396 if (copy_to_user((void __user *)arg, &binfo, len))
1397 return -EFAULT;
1398
1399 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001400}
1401
Michael J. Ruhlf404ca42017-09-26 07:04:16 -07001402/**
1403 * user_exp_rcv_setup - Set up the given tid rcv list
1404 * @fd: file data of the current driver instance
1405 * @arg: ioctl argumnent for user space information
1406 * @len: length of data structure associated with ioctl command
1407 *
1408 * Wrapper to validate ioctl information before doing _rcv_setup.
1409 *
1410 */
1411static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
1412 u32 len)
1413{
1414 int ret;
1415 unsigned long addr;
1416 struct hfi1_tid_info tinfo;
1417
1418 if (sizeof(tinfo) != len)
1419 return -EINVAL;
1420
1421 if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
1422 return -EFAULT;
1423
1424 ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
1425 if (!ret) {
1426 /*
1427 * Copy the number of tidlist entries we used
1428 * and the length of the buffer we registered.
1429 */
1430 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
1431 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
1432 sizeof(tinfo.tidcnt)))
1433 return -EFAULT;
1434
1435 addr = arg + offsetof(struct hfi1_tid_info, length);
1436 if (copy_to_user((void __user *)addr, &tinfo.length,
1437 sizeof(tinfo.length)))
1438 ret = -EFAULT;
1439 }
1440
1441 return ret;
1442}
1443
Mike Marciniszyn77241052015-07-30 15:17:43 -04001444static unsigned int poll_urgent(struct file *fp,
1445 struct poll_table_struct *pt)
1446{
Ira Weiny9e10af42015-10-30 18:58:40 -04001447 struct hfi1_filedata *fd = fp->private_data;
1448 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001449 struct hfi1_devdata *dd = uctxt->dd;
1450 unsigned pollflag;
1451
1452 poll_wait(fp, &uctxt->wait, pt);
1453
1454 spin_lock_irq(&dd->uctxt_lock);
1455 if (uctxt->urgent != uctxt->urgent_poll) {
1456 pollflag = POLLIN | POLLRDNORM;
1457 uctxt->urgent_poll = uctxt->urgent;
1458 } else {
1459 pollflag = 0;
1460 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
1461 }
1462 spin_unlock_irq(&dd->uctxt_lock);
1463
1464 return pollflag;
1465}
1466
1467static unsigned int poll_next(struct file *fp,
1468 struct poll_table_struct *pt)
1469{
Ira Weiny9e10af42015-10-30 18:58:40 -04001470 struct hfi1_filedata *fd = fp->private_data;
1471 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001472 struct hfi1_devdata *dd = uctxt->dd;
1473 unsigned pollflag;
1474
1475 poll_wait(fp, &uctxt->wait, pt);
1476
1477 spin_lock_irq(&dd->uctxt_lock);
1478 if (hdrqempty(uctxt)) {
1479 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
Michael J. Ruhl22505632017-07-24 07:46:06 -07001480 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001481 pollflag = 0;
Jubin Johne4909742016-02-14 20:22:00 -08001482 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001483 pollflag = POLLIN | POLLRDNORM;
Jubin Johne4909742016-02-14 20:22:00 -08001484 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001485 spin_unlock_irq(&dd->uctxt_lock);
1486
1487 return pollflag;
1488}
1489
1490/*
1491 * Find all user contexts in use, and set the specified bit in their
1492 * event mask.
1493 * See also find_ctxt() for a similar use, that is specific to send buffers.
1494 */
1495int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
1496{
1497 struct hfi1_ctxtdata *uctxt;
1498 struct hfi1_devdata *dd = ppd->dd;
Michael J. Ruhle6f76222017-07-24 07:45:55 -07001499 u16 ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001500
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001501 if (!dd->events)
1502 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001503
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07001504 for (ctxt = dd->first_dyn_alloc_ctxt; ctxt < dd->num_rcv_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001505 ctxt++) {
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001506 uctxt = hfi1_rcd_get_by_index(dd, ctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001507 if (uctxt) {
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -07001508 unsigned long *evs;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001509 int i;
1510 /*
1511 * subctxt_cnt is 0 if not shared, so do base
1512 * separately, first, then remaining subctxt, if any
1513 */
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -07001514 evs = dd->events + uctxt_offset(uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001515 set_bit(evtbit, evs);
1516 for (i = 1; i < uctxt->subctxt_cnt; i++)
1517 set_bit(evtbit, evs + i);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001518 hfi1_rcd_put(uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001519 }
1520 }
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001521
1522 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001523}
1524
1525/**
1526 * manage_rcvq - manage a context's receive queue
1527 * @uctxt: the context
1528 * @subctxt: the sub-context
1529 * @start_stop: action to carry out
1530 *
1531 * start_stop == 0 disables receive on the context, for use in queue
1532 * overflow conditions. start_stop==1 re-enables, to be used to
1533 * re-init the software copy of the head register
1534 */
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001535static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001536 int start_stop)
1537{
1538 struct hfi1_devdata *dd = uctxt->dd;
1539 unsigned int rcvctrl_op;
1540
1541 if (subctxt)
1542 goto bail;
1543 /* atomically clear receive enable ctxt. */
1544 if (start_stop) {
1545 /*
1546 * On enable, force in-memory copy of the tail register to
1547 * 0, so that protocol code doesn't have to worry about
1548 * whether or not the chip has yet updated the in-memory
1549 * copy or not on return from the system call. The chip
1550 * always resets it's tail register back to 0 on a
1551 * transition from disabled to enabled.
1552 */
1553 if (uctxt->rcvhdrtail_kvaddr)
1554 clear_rcvhdrtail(uctxt);
1555 rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
Jubin Johne4909742016-02-14 20:22:00 -08001556 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001557 rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
Jubin Johne4909742016-02-14 20:22:00 -08001558 }
Michael J. Ruhl22505632017-07-24 07:46:06 -07001559 hfi1_rcvctrl(dd, rcvctrl_op, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001560 /* always; new head should be equal to new tail; see above */
1561bail:
1562 return 0;
1563}
1564
1565/*
1566 * clear the event notifier events for this context.
1567 * User process then performs actions appropriate to bit having been
1568 * set, if desired, and checks again in future.
1569 */
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001570static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001571 unsigned long events)
1572{
1573 int i;
1574 struct hfi1_devdata *dd = uctxt->dd;
1575 unsigned long *evs;
1576
1577 if (!dd->events)
1578 return 0;
1579
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -07001580 evs = dd->events + uctxt_offset(uctxt) + subctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001581
1582 for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
1583 if (!test_bit(i, &events))
1584 continue;
1585 clear_bit(i, evs);
1586 }
1587 return 0;
1588}
1589
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001590static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001591{
1592 int ret = -ENOENT, i, intable = 0;
1593 struct hfi1_pportdata *ppd = uctxt->ppd;
1594 struct hfi1_devdata *dd = uctxt->dd;
1595
1596 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
1597 ret = -EINVAL;
1598 goto done;
1599 }
1600
1601 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
1602 if (pkey == ppd->pkeys[i]) {
1603 intable = 1;
1604 break;
1605 }
1606
1607 if (intable)
Michael J. Ruhl17573972017-07-24 07:46:01 -07001608 ret = hfi1_set_ctxt_pkey(dd, uctxt, pkey);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001609done:
1610 return ret;
1611}
1612
Mike Marciniszyn77241052015-07-30 15:17:43 -04001613static void user_remove(struct hfi1_devdata *dd)
1614{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001615
1616 hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001617}
1618
1619static int user_add(struct hfi1_devdata *dd)
1620{
1621 char name[10];
1622 int ret;
1623
Mike Marciniszyn77241052015-07-30 15:17:43 -04001624 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
Dennis Dalessandro0eb62652016-05-19 05:25:50 -07001625 ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops,
Ira Weinye116a642015-09-17 13:47:49 -04001626 &dd->user_cdev, &dd->user_device,
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -07001627 true, &dd->kobj);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001628 if (ret)
Dennis Dalessandro7312f292016-05-19 05:25:57 -07001629 user_remove(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001630
Mike Marciniszyn77241052015-07-30 15:17:43 -04001631 return ret;
1632}
1633
1634/*
1635 * Create per-unit files in /dev
1636 */
1637int hfi1_device_create(struct hfi1_devdata *dd)
1638{
Dennis Dalessandro0f7b1f92016-05-19 05:26:10 -07001639 return user_add(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001640}
1641
1642/*
1643 * Remove per-unit files in /dev
1644 * void, core kernel returns no errors for this stuff
1645 */
1646void hfi1_device_remove(struct hfi1_devdata *dd)
1647{
1648 user_remove(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001649}