blob: aecd37132f6c6968b83a9b8c8e2c39121d20caf1 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07002 * Copyright(c) 2015-2017 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040047#include <linux/poll.h>
48#include <linux/cdev.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040049#include <linux/vmalloc.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040050#include <linux/io.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010051#include <linux/sched/mm.h>
Michael J. Ruhl8737ce92017-05-04 05:15:15 -070052#include <linux/bitmap.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040053
Jason Gunthorpee6bd18f2016-04-10 19:13:13 -060054#include <rdma/ib.h>
55
Mike Marciniszyn77241052015-07-30 15:17:43 -040056#include "hfi.h"
57#include "pio.h"
58#include "device.h"
59#include "common.h"
60#include "trace.h"
Harish Chegondi637f4602017-08-21 18:27:23 -070061#include "mmu_rb.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040062#include "user_sdma.h"
Mitko Haralanov701e4412015-10-30 18:58:43 -040063#include "user_exp_rcv.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080064#include "aspm.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040065
66#undef pr_fmt
67#define pr_fmt(fmt) DRIVER_NAME ": " fmt
68
69#define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
70
71/*
72 * File operation functions
73 */
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -070074static int hfi1_file_open(struct inode *inode, struct file *fp);
75static int hfi1_file_close(struct inode *inode, struct file *fp);
76static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from);
77static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt);
78static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040079
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -070080static u64 kvirt_to_phys(void *addr);
Michael J. Ruhlddebe982017-09-26 07:03:50 -070081static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -070082static void init_subctxts(struct hfi1_ctxtdata *uctxt,
83 const struct hfi1_user_info *uinfo);
Michael J. Ruhle87473b2017-07-29 08:43:32 -070084static int init_user_ctxt(struct hfi1_filedata *fd,
85 struct hfi1_ctxtdata *uctxt);
Michael J. Ruhl62239fc2017-05-04 05:15:21 -070086static void user_init(struct hfi1_ctxtdata *uctxt);
Michael J. Ruhlff1a5582017-09-26 07:03:57 -070087static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
Michael J. Ruhl45afb322017-09-26 07:04:10 -070088static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
Michael J. Ruhlf404ca42017-09-26 07:04:16 -070089static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
90 u32 len);
Michael J. Ruhl3920eef2017-09-26 07:04:22 -070091static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg,
92 u32 len);
Michael J. Ruhle87473b2017-07-29 08:43:32 -070093static int setup_base_ctxt(struct hfi1_filedata *fd,
94 struct hfi1_ctxtdata *uctxt);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -070095static int setup_subctxt(struct hfi1_ctxtdata *uctxt);
Michael J. Ruhl5fbded42017-05-04 05:14:57 -070096
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -070097static int find_sub_ctxt(struct hfi1_filedata *fd,
98 const struct hfi1_user_info *uinfo);
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -070099static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700100 struct hfi1_user_info *uinfo,
101 struct hfi1_ctxtdata **cd);
Michael J. Ruhl42492012017-07-24 07:45:43 -0700102static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -0700103static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
104static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700105static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -0700106 unsigned long events);
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700107static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey);
108static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -0700109 int start_stop);
110static int vma_fault(struct vm_fault *vmf);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700111static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
112 unsigned long arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400113
114static const struct file_operations hfi1_file_ops = {
115 .owner = THIS_MODULE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400116 .write_iter = hfi1_write_iter,
117 .open = hfi1_file_open,
118 .release = hfi1_file_close,
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700119 .unlocked_ioctl = hfi1_file_ioctl,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400120 .poll = hfi1_poll,
121 .mmap = hfi1_file_mmap,
122 .llseek = noop_llseek,
123};
124
Arvind Yadav733da3b2017-08-28 09:59:28 +0530125static const struct vm_operations_struct vm_ops = {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400126 .fault = vma_fault,
127};
128
129/*
130 * Types of memories mapped into user processes' space
131 */
132enum mmap_types {
133 PIO_BUFS = 1,
134 PIO_BUFS_SOP,
135 PIO_CRED,
136 RCV_HDRQ,
137 RCV_EGRBUF,
138 UREGS,
139 EVENTS,
140 STATUS,
141 RTAIL,
142 SUBCTXT_UREGS,
143 SUBCTXT_RCV_HDRQ,
144 SUBCTXT_EGRBUF,
145 SDMA_COMP
146};
147
148/*
149 * Masks and offsets defining the mmap tokens
150 */
151#define HFI1_MMAP_OFFSET_MASK 0xfffULL
152#define HFI1_MMAP_OFFSET_SHIFT 0
153#define HFI1_MMAP_SUBCTXT_MASK 0xfULL
154#define HFI1_MMAP_SUBCTXT_SHIFT 12
155#define HFI1_MMAP_CTXT_MASK 0xffULL
156#define HFI1_MMAP_CTXT_SHIFT 16
157#define HFI1_MMAP_TYPE_MASK 0xfULL
158#define HFI1_MMAP_TYPE_SHIFT 24
159#define HFI1_MMAP_MAGIC_MASK 0xffffffffULL
160#define HFI1_MMAP_MAGIC_SHIFT 32
161
162#define HFI1_MMAP_MAGIC 0xdabbad00
163
164#define HFI1_MMAP_TOKEN_SET(field, val) \
165 (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
166#define HFI1_MMAP_TOKEN_GET(field, token) \
167 (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
168#define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \
169 (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
170 HFI1_MMAP_TOKEN_SET(TYPE, type) | \
171 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
172 HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
Geliang Tange260e402015-10-03 10:34:59 +0800173 HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400174
Mike Marciniszyn77241052015-07-30 15:17:43 -0400175#define dbg(fmt, ...) \
176 pr_info(fmt, ##__VA_ARGS__)
177
Mike Marciniszyn77241052015-07-30 15:17:43 -0400178static inline int is_valid_mmap(u64 token)
179{
180 return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
181}
182
183static int hfi1_file_open(struct inode *inode, struct file *fp)
184{
Ira Weinyea3a0ee2016-07-28 12:27:35 -0400185 struct hfi1_filedata *fd;
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -0700186 struct hfi1_devdata *dd = container_of(inode->i_cdev,
187 struct hfi1_devdata,
188 user_cdev);
189
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -0700190 if (!((dd->flags & HFI1_PRESENT) && dd->kregbase1))
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700191 return -EINVAL;
192
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -0700193 if (!atomic_inc_not_zero(&dd->user_refcount))
194 return -ENXIO;
195
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -0700196 /* Just take a ref now. Not all opens result in a context assign */
197 kobject_get(&dd->kobj);
198
Mike Marciniszyn77241052015-07-30 15:17:43 -0400199 /* The real work is performed later in assign_ctxt() */
Ira Weinyea3a0ee2016-07-28 12:27:35 -0400200
201 fd = kzalloc(sizeof(*fd), GFP_KERNEL);
202
Ira Weiny3faa3d92016-07-28 15:21:19 -0400203 if (fd) {
204 fd->rec_cpu_num = -1; /* no cpu affinity by default */
205 fd->mm = current->mm;
Vegard Nossumf1f10072017-02-27 14:30:07 -0800206 mmgrab(fd->mm);
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700207 fd->dd = dd;
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -0700208 fp->private_data = fd;
209 } else {
210 fp->private_data = NULL;
211
212 if (atomic_dec_and_test(&dd->user_refcount))
213 complete(&dd->user_comp);
214
215 return -ENOMEM;
Ira Weiny3faa3d92016-07-28 15:21:19 -0400216 }
Ira Weinyea3a0ee2016-07-28 12:27:35 -0400217
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -0700218 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400219}
220
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700221static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
222 unsigned long arg)
223{
224 struct hfi1_filedata *fd = fp->private_data;
225 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700226 struct hfi1_tid_info tinfo;
227 int ret = 0;
228 unsigned long addr;
229 int uval = 0;
230 unsigned long ul_uval = 0;
231 u16 uval16 = 0;
232
Dennis Dalessandro8a1882e2016-05-19 05:26:37 -0700233 hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700234 if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
235 cmd != HFI1_IOCTL_GET_VERS &&
236 !uctxt)
237 return -EINVAL;
238
239 switch (cmd) {
240 case HFI1_IOCTL_ASSIGN_CTXT:
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700241 ret = assign_ctxt(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700242 break;
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700243
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700244 case HFI1_IOCTL_CTXT_INFO:
Michael J. Ruhlff1a5582017-09-26 07:03:57 -0700245 ret = get_ctxt_info(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700246 break;
Michael J. Ruhlff1a5582017-09-26 07:03:57 -0700247
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700248 case HFI1_IOCTL_USER_INFO:
Michael J. Ruhl45afb322017-09-26 07:04:10 -0700249 ret = get_base_info(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700250 break;
Michael J. Ruhl45afb322017-09-26 07:04:10 -0700251
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700252 case HFI1_IOCTL_CREDIT_UPD:
Markus Elfringf7ca5352016-07-23 08:30:52 +0200253 if (uctxt)
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700254 sc_return_credits(uctxt->sc);
255 break;
256
257 case HFI1_IOCTL_TID_UPDATE:
Michael J. Ruhlf404ca42017-09-26 07:04:16 -0700258 ret = user_exp_rcv_setup(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700259 break;
260
261 case HFI1_IOCTL_TID_FREE:
Michael J. Ruhl3920eef2017-09-26 07:04:22 -0700262 ret = user_exp_rcv_clear(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700263 break;
264
265 case HFI1_IOCTL_TID_INVAL_READ:
266 if (copy_from_user(&tinfo,
267 (struct hfi11_tid_info __user *)arg,
268 sizeof(tinfo)))
269 return -EFAULT;
270
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -0700271 ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700272 if (ret)
273 break;
274 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
275 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
276 sizeof(tinfo.tidcnt)))
277 ret = -EFAULT;
278 break;
279
280 case HFI1_IOCTL_RECV_CTRL:
281 ret = get_user(uval, (int __user *)arg);
282 if (ret != 0)
283 return -EFAULT;
284 ret = manage_rcvq(uctxt, fd->subctxt, uval);
285 break;
286
287 case HFI1_IOCTL_POLL_TYPE:
288 ret = get_user(uval, (int __user *)arg);
289 if (ret != 0)
290 return -EFAULT;
291 uctxt->poll_type = (typeof(uctxt->poll_type))uval;
292 break;
293
294 case HFI1_IOCTL_ACK_EVENT:
295 ret = get_user(ul_uval, (unsigned long __user *)arg);
296 if (ret != 0)
297 return -EFAULT;
298 ret = user_event_ack(uctxt, fd->subctxt, ul_uval);
299 break;
300
301 case HFI1_IOCTL_SET_PKEY:
302 ret = get_user(uval16, (u16 __user *)arg);
303 if (ret != 0)
304 return -EFAULT;
305 if (HFI1_CAP_IS_USET(PKEY_CHECK))
306 ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16);
307 else
308 return -EPERM;
309 break;
310
311 case HFI1_IOCTL_CTXT_RESET: {
312 struct send_context *sc;
313 struct hfi1_devdata *dd;
314
315 if (!uctxt || !uctxt->dd || !uctxt->sc)
316 return -EINVAL;
317
318 /*
319 * There is no protection here. User level has to
320 * guarantee that no one will be writing to the send
321 * context while it is being re-initialized.
322 * If user level breaks that guarantee, it will break
323 * it's own context and no one else's.
324 */
325 dd = uctxt->dd;
326 sc = uctxt->sc;
327 /*
328 * Wait until the interrupt handler has marked the
329 * context as halted or frozen. Report error if we time
330 * out.
331 */
332 wait_event_interruptible_timeout(
333 sc->halt_wait, (sc->flags & SCF_HALTED),
334 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
335 if (!(sc->flags & SCF_HALTED))
336 return -ENOLCK;
337
338 /*
339 * If the send context was halted due to a Freeze,
340 * wait until the device has been "unfrozen" before
341 * resetting the context.
342 */
343 if (sc->flags & SCF_FROZEN) {
344 wait_event_interruptible_timeout(
345 dd->event_queue,
346 !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
347 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
348 if (dd->flags & HFI1_FROZEN)
349 return -ENOLCK;
350
351 if (dd->flags & HFI1_FORCED_FREEZE)
352 /*
353 * Don't allow context reset if we are into
354 * forced freeze
355 */
356 return -ENODEV;
357
358 sc_disable(sc);
359 ret = sc_enable(sc);
Michael J. Ruhl22505632017-07-24 07:46:06 -0700360 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700361 } else {
362 ret = sc_restart(sc);
363 }
364 if (!ret)
365 sc_return_credits(sc);
366 break;
367 }
368
369 case HFI1_IOCTL_GET_VERS:
370 uval = HFI1_USER_SWVERSION;
371 if (put_user(uval, (int __user *)arg))
372 return -EFAULT;
373 break;
374
375 default:
376 return -EINVAL;
377 }
378
379 return ret;
380}
381
Mike Marciniszyn77241052015-07-30 15:17:43 -0400382static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
383{
Ira Weiny9e10af42015-10-30 18:58:40 -0400384 struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
385 struct hfi1_user_sdma_pkt_q *pq = fd->pq;
386 struct hfi1_user_sdma_comp_q *cq = fd->cq;
Ira Weiny0904f322016-07-01 16:00:55 -0700387 int done = 0, reqs = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400388 unsigned long dim = from->nr_segs;
389
Ira Weiny0904f322016-07-01 16:00:55 -0700390 if (!cq || !pq)
391 return -EIO;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400392
Ira Weiny0904f322016-07-01 16:00:55 -0700393 if (!iter_is_iovec(from) || !dim)
394 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400395
Michael J. Ruhl34ab4de2017-08-28 11:23:27 -0700396 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400397
Ira Weiny0904f322016-07-01 16:00:55 -0700398 if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
399 return -ENOSPC;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400400
401 while (dim) {
Ira Weiny0904f322016-07-01 16:00:55 -0700402 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400403 unsigned long count = 0;
404
405 ret = hfi1_user_sdma_process_request(
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -0700406 fd, (struct iovec *)(from->iov + done),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400407 dim, &count);
Ira Weiny0904f322016-07-01 16:00:55 -0700408 if (ret) {
409 reqs = ret;
410 break;
411 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400412 dim -= count;
413 done += count;
414 reqs++;
415 }
Ira Weiny0904f322016-07-01 16:00:55 -0700416
417 return reqs;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400418}
419
420static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
421{
Ira Weiny9e10af42015-10-30 18:58:40 -0400422 struct hfi1_filedata *fd = fp->private_data;
423 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400424 struct hfi1_devdata *dd;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700425 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400426 u64 token = vma->vm_pgoff << PAGE_SHIFT,
427 memaddr = 0;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700428 void *memvirt = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400429 u8 subctxt, mapio = 0, vmf = 0, type;
430 ssize_t memlen = 0;
431 int ret = 0;
432 u16 ctxt;
433
Mike Marciniszyn77241052015-07-30 15:17:43 -0400434 if (!is_valid_mmap(token) || !uctxt ||
435 !(vma->vm_flags & VM_SHARED)) {
436 ret = -EINVAL;
437 goto done;
438 }
439 dd = uctxt->dd;
440 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
441 subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
442 type = HFI1_MMAP_TOKEN_GET(TYPE, token);
Ira Weiny9e10af42015-10-30 18:58:40 -0400443 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400444 ret = -EINVAL;
445 goto done;
446 }
447
448 flags = vma->vm_flags;
449
450 switch (type) {
451 case PIO_BUFS:
452 case PIO_BUFS_SOP:
453 memaddr = ((dd->physaddr + TXE_PIO_SEND) +
454 /* chip pio base */
Amitoj Kaur Chawlad32cf442015-10-16 22:09:08 +0530455 (uctxt->sc->hw_context * BIT(16))) +
Mike Marciniszyn77241052015-07-30 15:17:43 -0400456 /* 64K PIO space / ctxt */
457 (type == PIO_BUFS_SOP ?
458 (TXE_PIO_SIZE / 2) : 0); /* sop? */
459 /*
460 * Map only the amount allocated to the context, not the
461 * entire available context's PIO space.
462 */
Amitoj Kaur Chawla437b29d2016-03-04 22:45:00 +0530463 memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400464 flags &= ~VM_MAYREAD;
465 flags |= VM_DONTCOPY | VM_DONTEXPAND;
466 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
467 mapio = 1;
468 break;
469 case PIO_CRED:
470 if (flags & VM_WRITE) {
471 ret = -EPERM;
472 goto done;
473 }
474 /*
475 * The credit return location for this context could be on the
476 * second or third page allocated for credit returns (if number
477 * of enabled contexts > 64 and 128 respectively).
478 */
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700479 memvirt = dd->cr_base[uctxt->numa_id].va;
480 memaddr = virt_to_phys(memvirt) +
Mike Marciniszyn77241052015-07-30 15:17:43 -0400481 (((u64)uctxt->sc->hw_free -
482 (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
483 memlen = PAGE_SIZE;
484 flags &= ~VM_MAYWRITE;
485 flags |= VM_DONTCOPY | VM_DONTEXPAND;
486 /*
487 * The driver has already allocated memory for credit
488 * returns and programmed it into the chip. Has that
489 * memory been flagged as non-cached?
490 */
491 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
492 mapio = 1;
493 break;
494 case RCV_HDRQ:
Mike Marciniszyn77241052015-07-30 15:17:43 -0400495 memlen = uctxt->rcvhdrq_size;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700496 memvirt = uctxt->rcvhdrq;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400497 break;
498 case RCV_EGRBUF: {
499 unsigned long addr;
500 int i;
501 /*
502 * The RcvEgr buffer need to be handled differently
503 * as multiple non-contiguous pages need to be mapped
504 * into the user process.
505 */
506 memlen = uctxt->egrbufs.size;
507 if ((vma->vm_end - vma->vm_start) != memlen) {
508 dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
509 (vma->vm_end - vma->vm_start), memlen);
510 ret = -EINVAL;
511 goto done;
512 }
513 if (vma->vm_flags & VM_WRITE) {
514 ret = -EPERM;
515 goto done;
516 }
517 vma->vm_flags &= ~VM_MAYWRITE;
518 addr = vma->vm_start;
519 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700520 memlen = uctxt->egrbufs.buffers[i].len;
521 memvirt = uctxt->egrbufs.buffers[i].addr;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400522 ret = remap_pfn_range(
523 vma, addr,
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700524 /*
525 * virt_to_pfn() does the same, but
526 * it's not available on x86_64
527 * when CONFIG_MMU is enabled.
528 */
529 PFN_DOWN(__pa(memvirt)),
530 memlen,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400531 vma->vm_page_prot);
532 if (ret < 0)
533 goto done;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700534 addr += memlen;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400535 }
536 ret = 0;
537 goto done;
538 }
539 case UREGS:
540 /*
541 * Map only the page that contains this context's user
542 * registers.
543 */
544 memaddr = (unsigned long)
545 (dd->physaddr + RXE_PER_CONTEXT_USER)
546 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
547 /*
548 * TidFlow table is on the same page as the rest of the
549 * user registers.
550 */
551 memlen = PAGE_SIZE;
552 flags |= VM_DONTCOPY | VM_DONTEXPAND;
553 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
554 mapio = 1;
555 break;
556 case EVENTS:
557 /*
558 * Use the page where this context's flags are. User level
559 * knows where it's own bitmap is within the page.
560 */
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -0700561 memaddr = (unsigned long)
562 (dd->events + uctxt_offset(uctxt)) & PAGE_MASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400563 memlen = PAGE_SIZE;
564 /*
565 * v3.7 removes VM_RESERVED but the effect is kept by
566 * using VM_IO.
567 */
568 flags |= VM_IO | VM_DONTEXPAND;
569 vmf = 1;
570 break;
571 case STATUS:
Ira Weiny12220262017-04-09 10:17:24 -0700572 if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) {
573 ret = -EPERM;
574 goto done;
575 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400576 memaddr = kvirt_to_phys((void *)dd->status);
577 memlen = PAGE_SIZE;
578 flags |= VM_IO | VM_DONTEXPAND;
579 break;
580 case RTAIL:
581 if (!HFI1_CAP_IS_USET(DMA_RTAIL)) {
582 /*
583 * If the memory allocation failed, the context alloc
584 * also would have failed, so we would never get here
585 */
586 ret = -EINVAL;
587 goto done;
588 }
589 if (flags & VM_WRITE) {
590 ret = -EPERM;
591 goto done;
592 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400593 memlen = PAGE_SIZE;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700594 memvirt = (void *)uctxt->rcvhdrtail_kvaddr;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400595 flags &= ~VM_MAYWRITE;
596 break;
597 case SUBCTXT_UREGS:
598 memaddr = (u64)uctxt->subctxt_uregbase;
599 memlen = PAGE_SIZE;
600 flags |= VM_IO | VM_DONTEXPAND;
601 vmf = 1;
602 break;
603 case SUBCTXT_RCV_HDRQ:
604 memaddr = (u64)uctxt->subctxt_rcvhdr_base;
605 memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
606 flags |= VM_IO | VM_DONTEXPAND;
607 vmf = 1;
608 break;
609 case SUBCTXT_EGRBUF:
610 memaddr = (u64)uctxt->subctxt_rcvegrbuf;
611 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
612 flags |= VM_IO | VM_DONTEXPAND;
613 flags &= ~VM_MAYWRITE;
614 vmf = 1;
615 break;
616 case SDMA_COMP: {
Ira Weiny9e10af42015-10-30 18:58:40 -0400617 struct hfi1_user_sdma_comp_q *cq = fd->cq;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400618
Ira Weiny9e10af42015-10-30 18:58:40 -0400619 if (!cq) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400620 ret = -EFAULT;
621 goto done;
622 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400623 memaddr = (u64)cq->comps;
Amitoj Kaur Chawla437b29d2016-03-04 22:45:00 +0530624 memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400625 flags |= VM_IO | VM_DONTEXPAND;
626 vmf = 1;
627 break;
628 }
629 default:
630 ret = -EINVAL;
631 break;
632 }
633
634 if ((vma->vm_end - vma->vm_start) != memlen) {
635 hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
Ira Weiny9e10af42015-10-30 18:58:40 -0400636 uctxt->ctxt, fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400637 (vma->vm_end - vma->vm_start), memlen);
638 ret = -EINVAL;
639 goto done;
640 }
641
642 vma->vm_flags = flags;
Sebastian Sanchez6c63e422015-11-06 20:06:56 -0500643 hfi1_cdbg(PROC,
644 "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
645 ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400646 vma->vm_end - vma->vm_start, vma->vm_flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400647 if (vmf) {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700648 vma->vm_pgoff = PFN_DOWN(memaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400649 vma->vm_ops = &vm_ops;
650 ret = 0;
651 } else if (mapio) {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700652 ret = io_remap_pfn_range(vma, vma->vm_start,
653 PFN_DOWN(memaddr),
654 memlen,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400655 vma->vm_page_prot);
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700656 } else if (memvirt) {
657 ret = remap_pfn_range(vma, vma->vm_start,
658 PFN_DOWN(__pa(memvirt)),
659 memlen,
660 vma->vm_page_prot);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400661 } else {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700662 ret = remap_pfn_range(vma, vma->vm_start,
663 PFN_DOWN(memaddr),
664 memlen,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400665 vma->vm_page_prot);
666 }
667done:
668 return ret;
669}
670
671/*
672 * Local (non-chip) user memory is not mapped right away but as it is
673 * accessed by the user-level code.
674 */
Dave Jiang11bac802017-02-24 14:56:41 -0800675static int vma_fault(struct vm_fault *vmf)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400676{
677 struct page *page;
678
679 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
680 if (!page)
681 return VM_FAULT_SIGBUS;
682
683 get_page(page);
684 vmf->page = page;
685
686 return 0;
687}
688
689static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
690{
691 struct hfi1_ctxtdata *uctxt;
692 unsigned pollflag;
693
Ira Weiny9e10af42015-10-30 18:58:40 -0400694 uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400695 if (!uctxt)
696 pollflag = POLLERR;
697 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
698 pollflag = poll_urgent(fp, pt);
699 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
700 pollflag = poll_next(fp, pt);
701 else /* invalid */
702 pollflag = POLLERR;
703
704 return pollflag;
705}
706
707static int hfi1_file_close(struct inode *inode, struct file *fp)
708{
709 struct hfi1_filedata *fdata = fp->private_data;
710 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -0700711 struct hfi1_devdata *dd = container_of(inode->i_cdev,
712 struct hfi1_devdata,
713 user_cdev);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400714 unsigned long flags, *ev;
715
716 fp->private_data = NULL;
717
718 if (!uctxt)
719 goto done;
720
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700721 hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400722
723 flush_wc();
724 /* drain user sdma queue */
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700725 hfi1_user_sdma_free_queues(fdata, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400726
Mitko Haralanov957558c2016-02-03 14:33:40 -0800727 /* release the cpu */
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700728 hfi1_put_proc_affinity(fdata->rec_cpu_num);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800729
Michael J. Ruhl224d71f2017-05-04 05:14:34 -0700730 /* clean up rcv side */
731 hfi1_user_exp_rcv_free(fdata);
732
Mike Marciniszyn77241052015-07-30 15:17:43 -0400733 /*
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700734 * fdata->uctxt is used in the above cleanup. It is not ready to be
735 * removed until here.
736 */
737 fdata->uctxt = NULL;
738 hfi1_rcd_put(uctxt);
739
740 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -0400741 * Clear any left over, unhandled events so the next process that
742 * gets this context doesn't get confused.
743 */
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -0700744 ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400745 *ev = 0;
746
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700747 spin_lock_irqsave(&dd->uctxt_lock, flags);
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700748 __clear_bit(fdata->subctxt, uctxt->in_use_ctxts);
749 if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700750 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400751 goto done;
752 }
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700753 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400754
Mike Marciniszyn77241052015-07-30 15:17:43 -0400755 /*
756 * Disable receive context and interrupt available, reset all
757 * RcvCtxtCtrl bits to default values.
758 */
759 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
760 HFI1_RCVCTRL_TIDFLOW_DIS |
761 HFI1_RCVCTRL_INTRAVAIL_DIS |
Mitko Haralanov566c1572016-02-03 14:32:49 -0800762 HFI1_RCVCTRL_TAILUPD_DIS |
Mike Marciniszyn77241052015-07-30 15:17:43 -0400763 HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
764 HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
Michael J. Ruhl22505632017-07-24 07:46:06 -0700765 HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400766 /* Clear the context's J_KEY */
Michael J. Ruhl17573972017-07-24 07:46:01 -0700767 hfi1_clear_ctxt_jkey(dd, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400768 /*
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700769 * If a send context is allocated, reset context integrity
770 * checks to default and disable the send context.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400771 */
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700772 if (uctxt->sc) {
773 set_pio_integrity(uctxt->sc);
774 sc_disable(uctxt->sc);
775 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400776
Mike Marciniszyn9c1a99c32017-06-09 15:59:40 -0700777 hfi1_free_ctxt_rcv_groups(uctxt);
Michael J. Ruhl637a9a72017-05-04 05:15:03 -0700778 hfi1_clear_ctxt_pkey(dd, uctxt);
Mitko Haralanov94158442016-04-20 06:05:36 -0700779
Mike Marciniszyn77241052015-07-30 15:17:43 -0400780 uctxt->event_flags = 0;
Michael J. Ruhl42492012017-07-24 07:45:43 -0700781
782 deallocate_ctxt(uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400783done:
Ira Weinye0cf75d2016-08-16 13:27:03 -0700784 mmdrop(fdata->mm);
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -0700785 kobject_put(&dd->kobj);
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -0700786
787 if (atomic_dec_and_test(&dd->user_refcount))
788 complete(&dd->user_comp);
789
Mike Marciniszyn77241052015-07-30 15:17:43 -0400790 kfree(fdata);
791 return 0;
792}
793
794/*
795 * Convert kernel *virtual* addresses to physical addresses.
796 * This is used to vmalloc'ed addresses.
797 */
798static u64 kvirt_to_phys(void *addr)
799{
800 struct page *page;
801 u64 paddr = 0;
802
803 page = vmalloc_to_page(addr);
804 if (page)
805 paddr = page_to_pfn(page) << PAGE_SHIFT;
806
807 return paddr;
808}
809
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700810/**
811 * complete_subctxt
812 * @fd: valid filedata pointer
813 *
814 * Sub-context info can only be set up after the base context
815 * has been completed. This is indicated by the clearing of the
816 * HFI1_CTXT_BASE_UINIT bit.
817 *
818 * Wait for the bit to be cleared, and then complete the subcontext
819 * initialization.
820 *
821 */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700822static int complete_subctxt(struct hfi1_filedata *fd)
823{
824 int ret;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700825 unsigned long flags;
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700826
827 /*
828 * sub-context info can only be set up after the base context
829 * has been completed.
830 */
831 ret = wait_event_interruptible(
832 fd->uctxt->wait,
833 !test_bit(HFI1_CTXT_BASE_UNINIT, &fd->uctxt->event_flags));
834
835 if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags))
836 ret = -ENOMEM;
837
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700838 /* Finish the sub-context init */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700839 if (!ret) {
840 fd->rec_cpu_num = hfi1_get_proc_affinity(fd->uctxt->numa_id);
841 ret = init_user_ctxt(fd, fd->uctxt);
842 }
843
844 if (ret) {
845 hfi1_rcd_put(fd->uctxt);
846 fd->uctxt = NULL;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700847 spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700848 __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700849 spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700850 }
851
852 return ret;
853}
854
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700855static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400856{
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700857 int ret;
Dennis Dalessandro0eb62652016-05-19 05:25:50 -0700858 unsigned int swmajor, swminor;
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700859 struct hfi1_ctxtdata *uctxt = NULL;
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700860 struct hfi1_user_info uinfo;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400861
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700862 if (fd->uctxt)
863 return -EINVAL;
864
865 if (sizeof(uinfo) != len)
866 return -EINVAL;
867
868 if (copy_from_user(&uinfo, (void __user *)arg, sizeof(uinfo)))
869 return -EFAULT;
870
871 swmajor = uinfo.userversion >> 16;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700872 if (swmajor != HFI1_USER_SWMAJOR)
873 return -ENODEV;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400874
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700875 if (uinfo.subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700876 return -EINVAL;
877
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700878 swminor = uinfo.userversion & 0xffff;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400879
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700880 /*
881 * Acquire the mutex to protect against multiple creations of what
882 * could be a shared base context.
883 */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400884 mutex_lock(&hfi1_mutex);
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700885 /*
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700886 * Get a sub context if available (fd->uctxt will be set).
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700887 * ret < 0 error, 0 no context, 1 sub-context found
888 */
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700889 ret = find_sub_ctxt(fd, &uinfo);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400890
891 /*
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700892 * Allocate a base context if context sharing is not required or a
893 * sub context wasn't found.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400894 */
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700895 if (!ret)
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700896 ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt);
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700897
Mike Marciniszyn77241052015-07-30 15:17:43 -0400898 mutex_unlock(&hfi1_mutex);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700899
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700900 /* Depending on the context type, finish the appropriate init */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700901 switch (ret) {
902 case 0:
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700903 ret = setup_base_ctxt(fd, uctxt);
904 if (uctxt->subctxt_cnt) {
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700905 /*
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700906 * Base context is done (successfully or not), notify
907 * anybody using a sub-context that is waiting for
908 * this completion.
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700909 */
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700910 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
911 wake_up(&uctxt->wait);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700912 }
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700913 break;
914 case 1:
915 ret = complete_subctxt(fd);
916 break;
917 default:
918 break;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700919 }
920
Mike Marciniszyn77241052015-07-30 15:17:43 -0400921 return ret;
922}
923
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700924/**
925 * match_ctxt
926 * @fd: valid filedata pointer
927 * @uinfo: user info to compare base context with
928 * @uctxt: context to compare uinfo to.
929 *
930 * Compare the given context with the given information to see if it
931 * can be used for a sub context.
932 */
933static int match_ctxt(struct hfi1_filedata *fd,
934 const struct hfi1_user_info *uinfo,
935 struct hfi1_ctxtdata *uctxt)
936{
937 struct hfi1_devdata *dd = fd->dd;
938 unsigned long flags;
939 u16 subctxt;
940
941 /* Skip dynamically allocated kernel contexts */
942 if (uctxt->sc && (uctxt->sc->type == SC_KERNEL))
943 return 0;
944
945 /* Skip ctxt if it doesn't match the requested one */
946 if (memcmp(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)) ||
947 uctxt->jkey != generate_jkey(current_uid()) ||
948 uctxt->subctxt_id != uinfo->subctxt_id ||
949 uctxt->subctxt_cnt != uinfo->subctxt_cnt)
950 return 0;
951
952 /* Verify the sharing process matches the base */
953 if (uctxt->userversion != uinfo->userversion)
954 return -EINVAL;
955
956 /* Find an unused sub context */
957 spin_lock_irqsave(&dd->uctxt_lock, flags);
958 if (bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
959 /* context is being closed, do not use */
960 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
961 return 0;
962 }
963
964 subctxt = find_first_zero_bit(uctxt->in_use_ctxts,
965 HFI1_MAX_SHARED_CTXTS);
966 if (subctxt >= uctxt->subctxt_cnt) {
967 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
968 return -EBUSY;
969 }
970
971 fd->subctxt = subctxt;
972 __set_bit(fd->subctxt, uctxt->in_use_ctxts);
973 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
974
975 fd->uctxt = uctxt;
976 hfi1_rcd_get(uctxt);
977
978 return 1;
979}
980
981/**
982 * find_sub_ctxt
983 * @fd: valid filedata pointer
984 * @uinfo: matching info to use to find a possible context to share.
985 *
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700986 * The hfi1_mutex must be held when this function is called. It is
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700987 * necessary to ensure serialized creation of shared contexts.
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700988 *
989 * Return:
990 * 0 No sub-context found
991 * 1 Subcontext found and allocated
992 * errno EINVAL (incorrect parameters)
993 * EBUSY (all sub contexts in use)
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700994 */
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700995static int find_sub_ctxt(struct hfi1_filedata *fd,
996 const struct hfi1_user_info *uinfo)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400997{
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700998 struct hfi1_ctxtdata *uctxt;
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700999 struct hfi1_devdata *dd = fd->dd;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001000 u16 i;
1001 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001002
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001003 if (!uinfo->subctxt_cnt)
1004 return 0;
1005
Michael J. Ruhl5fbded42017-05-04 05:14:57 -07001006 for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) {
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001007 uctxt = hfi1_rcd_get_by_index(dd, i);
1008 if (uctxt) {
1009 ret = match_ctxt(fd, uinfo, uctxt);
1010 hfi1_rcd_put(uctxt);
1011 /* value of != 0 will return */
1012 if (ret)
1013 return ret;
1014 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001015 }
1016
Michael J. Ruhl5fbded42017-05-04 05:14:57 -07001017 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001018}
1019
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -07001020static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001021 struct hfi1_user_info *uinfo,
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001022 struct hfi1_ctxtdata **rcd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001023{
1024 struct hfi1_ctxtdata *uctxt;
Mitko Haralanov957558c2016-02-03 14:33:40 -08001025 int ret, numa;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001026
1027 if (dd->flags & HFI1_FROZEN) {
1028 /*
1029 * Pick an error that is unique from all other errors
1030 * that are returned so the user process knows that
1031 * it tried to allocate while the SPC was frozen. It
1032 * it should be able to retry with success in a short
1033 * while.
1034 */
1035 return -EIO;
1036 }
1037
Michael J. Ruhl5fbded42017-05-04 05:14:57 -07001038 if (!dd->freectxts)
1039 return -EBUSY;
1040
Sebastian Sanchezb094a362016-07-25 07:54:57 -07001041 /*
1042 * If we don't have a NUMA node requested, preference is towards
1043 * device NUMA node.
1044 */
1045 fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node);
Mitko Haralanov957558c2016-02-03 14:33:40 -08001046 if (fd->rec_cpu_num != -1)
1047 numa = cpu_to_node(fd->rec_cpu_num);
1048 else
1049 numa = numa_node_id();
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001050 ret = hfi1_create_ctxtdata(dd->pport, numa, &uctxt);
1051 if (ret < 0) {
1052 dd_dev_err(dd, "user ctxtdata allocation failed\n");
1053 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001054 }
Mitko Haralanov957558c2016-02-03 14:33:40 -08001055 hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)",
1056 uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num,
1057 uctxt->numa_id);
1058
Mike Marciniszyn77241052015-07-30 15:17:43 -04001059 /*
1060 * Allocate and enable a PIO send context.
1061 */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001062 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node);
Jakub Pawlak3a6982d2016-09-25 07:42:23 -07001063 if (!uctxt->sc) {
1064 ret = -ENOMEM;
1065 goto ctxdata_free;
1066 }
Sebastian Sanchez6c63e422015-11-06 20:06:56 -05001067 hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index,
1068 uctxt->sc->hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001069 ret = sc_enable(uctxt->sc);
1070 if (ret)
Jakub Pawlak3a6982d2016-09-25 07:42:23 -07001071 goto ctxdata_free;
1072
Mike Marciniszyn77241052015-07-30 15:17:43 -04001073 /*
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001074 * Setup sub context information if the user-level has requested
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001075 * sub contexts.
Mike Marciniszyn77241052015-07-30 15:17:43 -04001076 * This has to be done here so the rest of the sub-contexts find the
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001077 * proper base context.
Mike Marciniszyn77241052015-07-30 15:17:43 -04001078 */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001079 if (uinfo->subctxt_cnt)
1080 init_subctxts(uctxt, uinfo);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001081 uctxt->userversion = uinfo->userversion;
Dean Luickbdf77522016-07-28 15:21:13 -04001082 uctxt->flags = hfi1_cap_mask; /* save current flag state */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001083 init_waitqueue_head(&uctxt->wait);
1084 strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
1085 memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
1086 uctxt->jkey = generate_jkey(current_uid());
Mike Marciniszyn77241052015-07-30 15:17:43 -04001087 hfi1_stats.sps_ctxts++;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08001088 /*
1089 * Disable ASPM when there are open user/PSM contexts to avoid
1090 * issues with ASPM L1 exit latency
1091 */
1092 if (dd->freectxts-- == dd->num_user_contexts)
1093 aspm_disable_all(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001094
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001095 *rcd = uctxt;
Michael J. Ruhlf683c802017-06-09 16:00:19 -07001096
Mike Marciniszyn77241052015-07-30 15:17:43 -04001097 return 0;
Jakub Pawlak3a6982d2016-09-25 07:42:23 -07001098
1099ctxdata_free:
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001100 hfi1_free_ctxt(uctxt);
Jakub Pawlak3a6982d2016-09-25 07:42:23 -07001101 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001102}
1103
Michael J. Ruhl42492012017-07-24 07:45:43 -07001104static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt)
1105{
1106 mutex_lock(&hfi1_mutex);
1107 hfi1_stats.sps_ctxts--;
1108 if (++uctxt->dd->freectxts == uctxt->dd->num_user_contexts)
1109 aspm_enable_all(uctxt->dd);
Michael J. Ruhl42492012017-07-24 07:45:43 -07001110 mutex_unlock(&hfi1_mutex);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001111
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001112 hfi1_free_ctxt(uctxt);
Michael J. Ruhl42492012017-07-24 07:45:43 -07001113}
1114
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001115static void init_subctxts(struct hfi1_ctxtdata *uctxt,
1116 const struct hfi1_user_info *uinfo)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001117{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001118 uctxt->subctxt_cnt = uinfo->subctxt_cnt;
1119 uctxt->subctxt_id = uinfo->subctxt_id;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001120 set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001121}
1122
1123static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
1124{
1125 int ret = 0;
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001126 u16 num_subctxts = uctxt->subctxt_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001127
1128 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001129 if (!uctxt->subctxt_uregbase)
1130 return -ENOMEM;
1131
Mike Marciniszyn77241052015-07-30 15:17:43 -04001132 /* We can take the size of the RcvHdr Queue from the master */
1133 uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
1134 num_subctxts);
1135 if (!uctxt->subctxt_rcvhdr_base) {
1136 ret = -ENOMEM;
1137 goto bail_ureg;
1138 }
1139
1140 uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
1141 num_subctxts);
1142 if (!uctxt->subctxt_rcvegrbuf) {
1143 ret = -ENOMEM;
1144 goto bail_rhdr;
1145 }
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001146
1147 return 0;
1148
Mike Marciniszyn77241052015-07-30 15:17:43 -04001149bail_rhdr:
1150 vfree(uctxt->subctxt_rcvhdr_base);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001151 uctxt->subctxt_rcvhdr_base = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001152bail_ureg:
1153 vfree(uctxt->subctxt_uregbase);
1154 uctxt->subctxt_uregbase = NULL;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001155
Mike Marciniszyn77241052015-07-30 15:17:43 -04001156 return ret;
1157}
1158
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001159static void user_init(struct hfi1_ctxtdata *uctxt)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001160{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001161 unsigned int rcvctrl_ops = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001162
1163 /* initialize poll variables... */
1164 uctxt->urgent = 0;
1165 uctxt->urgent_poll = 0;
1166
1167 /*
1168 * Now enable the ctxt for receive.
1169 * For chips that are set to DMA the tail register to memory
1170 * when they change (and when the update bit transitions from
1171 * 0 to 1. So for those chips, we turn it off and then back on.
1172 * This will (very briefly) affect any other open ctxts, but the
1173 * duration is very short, and therefore isn't an issue. We
1174 * explicitly set the in-memory tail copy to 0 beforehand, so we
1175 * don't have to wait to be sure the DMA update has happened
1176 * (chip resets head/tail to 0 on transition to enable).
1177 */
1178 if (uctxt->rcvhdrtail_kvaddr)
1179 clear_rcvhdrtail(uctxt);
1180
1181 /* Setup J_KEY before enabling the context */
Michael J. Ruhl17573972017-07-24 07:46:01 -07001182 hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001183
1184 rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
Dean Luickbdf77522016-07-28 15:21:13 -04001185 if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001186 rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
1187 /*
1188 * Ignore the bit in the flags for now until proper
1189 * support for multiple packet per rcv array entry is
1190 * added.
1191 */
Dean Luickbdf77522016-07-28 15:21:13 -04001192 if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001193 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
Dean Luickbdf77522016-07-28 15:21:13 -04001194 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001195 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
Dean Luickbdf77522016-07-28 15:21:13 -04001196 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001197 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
Mitko Haralanov566c1572016-02-03 14:32:49 -08001198 /*
1199 * The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
1200 * We can't rely on the correct value to be set from prior
1201 * uses of the chip or ctxt. Therefore, add the rcvctrl op
1202 * for both cases.
1203 */
Dean Luickbdf77522016-07-28 15:21:13 -04001204 if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001205 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
Mitko Haralanov566c1572016-02-03 14:32:49 -08001206 else
1207 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
Michael J. Ruhl22505632017-07-24 07:46:06 -07001208 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001209}
1210
Michael J. Ruhlff1a5582017-09-26 07:03:57 -07001211static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001212{
1213 struct hfi1_ctxt_info cinfo;
Ira Weiny9e10af42015-10-30 18:58:40 -04001214 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Michael J. Ruhlff1a5582017-09-26 07:03:57 -07001215
1216 if (sizeof(cinfo) != len)
1217 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001218
Dan Carpenterebe6b2e2015-09-16 09:42:25 +03001219 memset(&cinfo, 0, sizeof(cinfo));
Dean Luickbdf77522016-07-28 15:21:13 -04001220 cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
1221 HFI1_CAP_MISC_MASK) << HFI1_CAP_USER_SHIFT) |
1222 HFI1_CAP_UGET_MASK(uctxt->flags, MASK) |
1223 HFI1_CAP_KGET_MASK(uctxt->flags, K2U);
Dean Luick622c2022016-07-28 15:21:21 -04001224 /* adjust flag if this fd is not able to cache */
1225 if (!fd->handler)
1226 cinfo.runtime_flags |= HFI1_CAP_TID_UNMAP; /* no caching */
1227
Mike Marciniszyn77241052015-07-30 15:17:43 -04001228 cinfo.num_active = hfi1_count_active_units();
1229 cinfo.unit = uctxt->dd->unit;
1230 cinfo.ctxt = uctxt->ctxt;
Ira Weiny9e10af42015-10-30 18:58:40 -04001231 cinfo.subctxt = fd->subctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001232 cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
1233 uctxt->dd->rcv_entries.group_size) +
1234 uctxt->expected_count;
1235 cinfo.credits = uctxt->sc->credits;
1236 cinfo.numa_node = uctxt->numa_id;
1237 cinfo.rec_cpu = fd->rec_cpu_num;
1238 cinfo.send_ctxt = uctxt->sc->hw_context;
1239
1240 cinfo.egrtids = uctxt->egrbufs.alloced;
1241 cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
1242 cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
Ira Weiny9e10af42015-10-30 18:58:40 -04001243 cinfo.sdma_ring_size = fd->cq->nentries;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001244 cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
1245
Ira Weiny9e10af42015-10-30 18:58:40 -04001246 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
Michael J. Ruhlff1a5582017-09-26 07:03:57 -07001247 if (copy_to_user((void __user *)arg, &cinfo, len))
1248 return -EFAULT;
Dean Luickbdf77522016-07-28 15:21:13 -04001249
Michael J. Ruhlff1a5582017-09-26 07:03:57 -07001250 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001251}
1252
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001253static int init_user_ctxt(struct hfi1_filedata *fd,
1254 struct hfi1_ctxtdata *uctxt)
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001255{
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001256 int ret;
1257
1258 ret = hfi1_user_sdma_alloc_queues(uctxt, fd);
1259 if (ret)
1260 return ret;
1261
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001262 ret = hfi1_user_exp_rcv_init(fd, uctxt);
1263 if (ret)
1264 hfi1_user_sdma_free_queues(fd, uctxt);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001265
1266 return ret;
1267}
1268
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001269static int setup_base_ctxt(struct hfi1_filedata *fd,
1270 struct hfi1_ctxtdata *uctxt)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001271{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001272 struct hfi1_devdata *dd = uctxt->dd;
1273 int ret = 0;
1274
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001275 hfi1_init_ctxt(uctxt->sc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001276
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001277 /* Now allocate the RcvHdr queue and eager buffers. */
1278 ret = hfi1_create_rcvhdrq(dd, uctxt);
Mitko Haralanov94158442016-04-20 06:05:36 -07001279 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001280 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001281
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001282 ret = hfi1_setup_eagerbufs(uctxt);
1283 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001284 goto setup_failed;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001285
1286 /* If sub-contexts are enabled, do the appropriate setup */
1287 if (uctxt->subctxt_cnt)
1288 ret = setup_subctxt(uctxt);
1289 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001290 goto setup_failed;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001291
Mike Marciniszyn9c1a99c32017-06-09 15:59:40 -07001292 ret = hfi1_alloc_ctxt_rcv_groups(uctxt);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001293 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001294 goto setup_failed;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001295
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001296 ret = init_user_ctxt(fd, uctxt);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001297 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001298 goto setup_failed;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001299
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001300 user_init(uctxt);
1301
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001302 /* Now that the context is set up, the fd can get a reference. */
1303 fd->uctxt = uctxt;
1304 hfi1_rcd_get(uctxt);
1305
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001306 return 0;
1307
1308setup_failed:
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001309 /* Set the failed bit so sub-context init can do the right thing */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001310 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
1311 deallocate_ctxt(uctxt);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001312
Mike Marciniszyn77241052015-07-30 15:17:43 -04001313 return ret;
1314}
1315
Michael J. Ruhl45afb322017-09-26 07:04:10 -07001316static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001317{
1318 struct hfi1_base_info binfo;
Ira Weiny9e10af42015-10-30 18:58:40 -04001319 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001320 struct hfi1_devdata *dd = uctxt->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001321 unsigned offset;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001322
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001323 trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001324
Michael J. Ruhl45afb322017-09-26 07:04:10 -07001325 if (sizeof(binfo) != len)
1326 return -EINVAL;
1327
Mike Marciniszyn77241052015-07-30 15:17:43 -04001328 memset(&binfo, 0, sizeof(binfo));
1329 binfo.hw_version = dd->revision;
1330 binfo.sw_version = HFI1_KERN_SWVERSION;
1331 binfo.bthqp = kdeth_qp;
1332 binfo.jkey = uctxt->jkey;
1333 /*
1334 * If more than 64 contexts are enabled the allocated credit
1335 * return will span two or three contiguous pages. Since we only
1336 * map the page containing the context's credit return address,
1337 * we need to calculate the offset in the proper page.
1338 */
1339 offset = ((u64)uctxt->sc->hw_free -
1340 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
1341 binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001342 fd->subctxt, offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001343 binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001344 fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001345 uctxt->sc->base_addr);
1346 binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
1347 uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001348 fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001349 uctxt->sc->base_addr);
1350 binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001351 fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001352 uctxt->rcvhdrq);
1353 binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001354 fd->subctxt,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001355 uctxt->egrbufs.rcvtids[0].dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001356 binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001357 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001358 /*
1359 * user regs are at
1360 * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
1361 */
1362 binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001363 fd->subctxt, 0);
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -07001364 offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) *
1365 sizeof(*dd->events));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001366 binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001367 fd->subctxt,
1368 offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001369 binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001370 fd->subctxt,
1371 dd->status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001372 if (HFI1_CAP_IS_USET(DMA_RTAIL))
1373 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001374 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001375 if (uctxt->subctxt_cnt) {
1376 binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001377 uctxt->ctxt,
1378 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001379 binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001380 uctxt->ctxt,
1381 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001382 binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001383 uctxt->ctxt,
1384 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001385 }
Michael J. Ruhl45afb322017-09-26 07:04:10 -07001386
1387 if (copy_to_user((void __user *)arg, &binfo, len))
1388 return -EFAULT;
1389
1390 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001391}
1392
Michael J. Ruhlf404ca42017-09-26 07:04:16 -07001393/**
1394 * user_exp_rcv_setup - Set up the given tid rcv list
1395 * @fd: file data of the current driver instance
1396 * @arg: ioctl argumnent for user space information
1397 * @len: length of data structure associated with ioctl command
1398 *
1399 * Wrapper to validate ioctl information before doing _rcv_setup.
1400 *
1401 */
1402static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
1403 u32 len)
1404{
1405 int ret;
1406 unsigned long addr;
1407 struct hfi1_tid_info tinfo;
1408
1409 if (sizeof(tinfo) != len)
1410 return -EINVAL;
1411
1412 if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
1413 return -EFAULT;
1414
1415 ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
1416 if (!ret) {
1417 /*
1418 * Copy the number of tidlist entries we used
1419 * and the length of the buffer we registered.
1420 */
1421 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
1422 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
1423 sizeof(tinfo.tidcnt)))
1424 return -EFAULT;
1425
1426 addr = arg + offsetof(struct hfi1_tid_info, length);
1427 if (copy_to_user((void __user *)addr, &tinfo.length,
1428 sizeof(tinfo.length)))
1429 ret = -EFAULT;
1430 }
1431
1432 return ret;
1433}
1434
Michael J. Ruhl3920eef2017-09-26 07:04:22 -07001435/**
1436 * user_exp_rcv_clear - Clear the given tid rcv list
1437 * @fd: file data of the current driver instance
1438 * @arg: ioctl argumnent for user space information
1439 * @len: length of data structure associated with ioctl command
1440 *
1441 * The hfi1_user_exp_rcv_clear() can be called from the error path. Because
1442 * of this, we need to use this wrapper to copy the user space information
1443 * before doing the clear.
1444 */
1445static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg,
1446 u32 len)
1447{
1448 int ret;
1449 unsigned long addr;
1450 struct hfi1_tid_info tinfo;
1451
1452 if (sizeof(tinfo) != len)
1453 return -EINVAL;
1454
1455 if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
1456 return -EFAULT;
1457
1458 ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
1459 if (!ret) {
1460 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
1461 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
1462 sizeof(tinfo.tidcnt)))
1463 return -EFAULT;
1464 }
1465
1466 return ret;
1467}
1468
Mike Marciniszyn77241052015-07-30 15:17:43 -04001469static unsigned int poll_urgent(struct file *fp,
1470 struct poll_table_struct *pt)
1471{
Ira Weiny9e10af42015-10-30 18:58:40 -04001472 struct hfi1_filedata *fd = fp->private_data;
1473 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001474 struct hfi1_devdata *dd = uctxt->dd;
1475 unsigned pollflag;
1476
1477 poll_wait(fp, &uctxt->wait, pt);
1478
1479 spin_lock_irq(&dd->uctxt_lock);
1480 if (uctxt->urgent != uctxt->urgent_poll) {
1481 pollflag = POLLIN | POLLRDNORM;
1482 uctxt->urgent_poll = uctxt->urgent;
1483 } else {
1484 pollflag = 0;
1485 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
1486 }
1487 spin_unlock_irq(&dd->uctxt_lock);
1488
1489 return pollflag;
1490}
1491
1492static unsigned int poll_next(struct file *fp,
1493 struct poll_table_struct *pt)
1494{
Ira Weiny9e10af42015-10-30 18:58:40 -04001495 struct hfi1_filedata *fd = fp->private_data;
1496 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001497 struct hfi1_devdata *dd = uctxt->dd;
1498 unsigned pollflag;
1499
1500 poll_wait(fp, &uctxt->wait, pt);
1501
1502 spin_lock_irq(&dd->uctxt_lock);
1503 if (hdrqempty(uctxt)) {
1504 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
Michael J. Ruhl22505632017-07-24 07:46:06 -07001505 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001506 pollflag = 0;
Jubin Johne4909742016-02-14 20:22:00 -08001507 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001508 pollflag = POLLIN | POLLRDNORM;
Jubin Johne4909742016-02-14 20:22:00 -08001509 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001510 spin_unlock_irq(&dd->uctxt_lock);
1511
1512 return pollflag;
1513}
1514
1515/*
1516 * Find all user contexts in use, and set the specified bit in their
1517 * event mask.
1518 * See also find_ctxt() for a similar use, that is specific to send buffers.
1519 */
1520int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
1521{
1522 struct hfi1_ctxtdata *uctxt;
1523 struct hfi1_devdata *dd = ppd->dd;
Michael J. Ruhle6f76222017-07-24 07:45:55 -07001524 u16 ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001525
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001526 if (!dd->events)
1527 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001528
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07001529 for (ctxt = dd->first_dyn_alloc_ctxt; ctxt < dd->num_rcv_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001530 ctxt++) {
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001531 uctxt = hfi1_rcd_get_by_index(dd, ctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001532 if (uctxt) {
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -07001533 unsigned long *evs;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001534 int i;
1535 /*
1536 * subctxt_cnt is 0 if not shared, so do base
1537 * separately, first, then remaining subctxt, if any
1538 */
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -07001539 evs = dd->events + uctxt_offset(uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001540 set_bit(evtbit, evs);
1541 for (i = 1; i < uctxt->subctxt_cnt; i++)
1542 set_bit(evtbit, evs + i);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001543 hfi1_rcd_put(uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001544 }
1545 }
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001546
1547 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001548}
1549
1550/**
1551 * manage_rcvq - manage a context's receive queue
1552 * @uctxt: the context
1553 * @subctxt: the sub-context
1554 * @start_stop: action to carry out
1555 *
1556 * start_stop == 0 disables receive on the context, for use in queue
1557 * overflow conditions. start_stop==1 re-enables, to be used to
1558 * re-init the software copy of the head register
1559 */
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001560static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001561 int start_stop)
1562{
1563 struct hfi1_devdata *dd = uctxt->dd;
1564 unsigned int rcvctrl_op;
1565
1566 if (subctxt)
1567 goto bail;
1568 /* atomically clear receive enable ctxt. */
1569 if (start_stop) {
1570 /*
1571 * On enable, force in-memory copy of the tail register to
1572 * 0, so that protocol code doesn't have to worry about
1573 * whether or not the chip has yet updated the in-memory
1574 * copy or not on return from the system call. The chip
1575 * always resets it's tail register back to 0 on a
1576 * transition from disabled to enabled.
1577 */
1578 if (uctxt->rcvhdrtail_kvaddr)
1579 clear_rcvhdrtail(uctxt);
1580 rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
Jubin Johne4909742016-02-14 20:22:00 -08001581 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001582 rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
Jubin Johne4909742016-02-14 20:22:00 -08001583 }
Michael J. Ruhl22505632017-07-24 07:46:06 -07001584 hfi1_rcvctrl(dd, rcvctrl_op, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001585 /* always; new head should be equal to new tail; see above */
1586bail:
1587 return 0;
1588}
1589
1590/*
1591 * clear the event notifier events for this context.
1592 * User process then performs actions appropriate to bit having been
1593 * set, if desired, and checks again in future.
1594 */
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001595static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001596 unsigned long events)
1597{
1598 int i;
1599 struct hfi1_devdata *dd = uctxt->dd;
1600 unsigned long *evs;
1601
1602 if (!dd->events)
1603 return 0;
1604
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -07001605 evs = dd->events + uctxt_offset(uctxt) + subctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001606
1607 for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
1608 if (!test_bit(i, &events))
1609 continue;
1610 clear_bit(i, evs);
1611 }
1612 return 0;
1613}
1614
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001615static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001616{
1617 int ret = -ENOENT, i, intable = 0;
1618 struct hfi1_pportdata *ppd = uctxt->ppd;
1619 struct hfi1_devdata *dd = uctxt->dd;
1620
1621 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
1622 ret = -EINVAL;
1623 goto done;
1624 }
1625
1626 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
1627 if (pkey == ppd->pkeys[i]) {
1628 intable = 1;
1629 break;
1630 }
1631
1632 if (intable)
Michael J. Ruhl17573972017-07-24 07:46:01 -07001633 ret = hfi1_set_ctxt_pkey(dd, uctxt, pkey);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001634done:
1635 return ret;
1636}
1637
Mike Marciniszyn77241052015-07-30 15:17:43 -04001638static void user_remove(struct hfi1_devdata *dd)
1639{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001640
1641 hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001642}
1643
1644static int user_add(struct hfi1_devdata *dd)
1645{
1646 char name[10];
1647 int ret;
1648
Mike Marciniszyn77241052015-07-30 15:17:43 -04001649 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
Dennis Dalessandro0eb62652016-05-19 05:25:50 -07001650 ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops,
Ira Weinye116a642015-09-17 13:47:49 -04001651 &dd->user_cdev, &dd->user_device,
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -07001652 true, &dd->kobj);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001653 if (ret)
Dennis Dalessandro7312f292016-05-19 05:25:57 -07001654 user_remove(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001655
Mike Marciniszyn77241052015-07-30 15:17:43 -04001656 return ret;
1657}
1658
1659/*
1660 * Create per-unit files in /dev
1661 */
1662int hfi1_device_create(struct hfi1_devdata *dd)
1663{
Dennis Dalessandro0f7b1f92016-05-19 05:26:10 -07001664 return user_add(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001665}
1666
1667/*
1668 * Remove per-unit files in /dev
1669 * void, core kernel returns no errors for this stuff
1670 */
1671void hfi1_device_remove(struct hfi1_devdata *dd)
1672{
1673 user_remove(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001674}