blob: f0af5709ff85f6bba5b72f403ede7908ca910300 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07002 * Copyright(c) 2015-2017 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040047#include <linux/poll.h>
48#include <linux/cdev.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040049#include <linux/vmalloc.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040050#include <linux/io.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010051#include <linux/sched/mm.h>
Michael J. Ruhl8737ce92017-05-04 05:15:15 -070052#include <linux/bitmap.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040053
Jason Gunthorpee6bd18f2016-04-10 19:13:13 -060054#include <rdma/ib.h>
55
Mike Marciniszyn77241052015-07-30 15:17:43 -040056#include "hfi.h"
57#include "pio.h"
58#include "device.h"
59#include "common.h"
60#include "trace.h"
Harish Chegondi637f4602017-08-21 18:27:23 -070061#include "mmu_rb.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040062#include "user_sdma.h"
Mitko Haralanov701e4412015-10-30 18:58:43 -040063#include "user_exp_rcv.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080064#include "aspm.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040065
66#undef pr_fmt
67#define pr_fmt(fmt) DRIVER_NAME ": " fmt
68
69#define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
70
71/*
72 * File operation functions
73 */
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -070074static int hfi1_file_open(struct inode *inode, struct file *fp);
75static int hfi1_file_close(struct inode *inode, struct file *fp);
76static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from);
77static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt);
78static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040079
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -070080static u64 kvirt_to_phys(void *addr);
Michael J. Ruhlddebe982017-09-26 07:03:50 -070081static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -070082static void init_subctxts(struct hfi1_ctxtdata *uctxt,
83 const struct hfi1_user_info *uinfo);
Michael J. Ruhle87473b2017-07-29 08:43:32 -070084static int init_user_ctxt(struct hfi1_filedata *fd,
85 struct hfi1_ctxtdata *uctxt);
Michael J. Ruhl62239fc2017-05-04 05:15:21 -070086static void user_init(struct hfi1_ctxtdata *uctxt);
Michael J. Ruhlff1a5582017-09-26 07:03:57 -070087static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -070088static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
89 __u32 len);
Michael J. Ruhle87473b2017-07-29 08:43:32 -070090static int setup_base_ctxt(struct hfi1_filedata *fd,
91 struct hfi1_ctxtdata *uctxt);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -070092static int setup_subctxt(struct hfi1_ctxtdata *uctxt);
Michael J. Ruhl5fbded42017-05-04 05:14:57 -070093
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -070094static int find_sub_ctxt(struct hfi1_filedata *fd,
95 const struct hfi1_user_info *uinfo);
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -070096static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
Michael J. Ruhle87473b2017-07-29 08:43:32 -070097 struct hfi1_user_info *uinfo,
98 struct hfi1_ctxtdata **cd);
Michael J. Ruhl42492012017-07-24 07:45:43 -070099static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -0700100static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
101static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700102static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -0700103 unsigned long events);
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700104static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey);
105static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -0700106 int start_stop);
107static int vma_fault(struct vm_fault *vmf);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700108static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
109 unsigned long arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400110
111static const struct file_operations hfi1_file_ops = {
112 .owner = THIS_MODULE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400113 .write_iter = hfi1_write_iter,
114 .open = hfi1_file_open,
115 .release = hfi1_file_close,
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700116 .unlocked_ioctl = hfi1_file_ioctl,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400117 .poll = hfi1_poll,
118 .mmap = hfi1_file_mmap,
119 .llseek = noop_llseek,
120};
121
Arvind Yadav733da3b2017-08-28 09:59:28 +0530122static const struct vm_operations_struct vm_ops = {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400123 .fault = vma_fault,
124};
125
126/*
127 * Types of memories mapped into user processes' space
128 */
129enum mmap_types {
130 PIO_BUFS = 1,
131 PIO_BUFS_SOP,
132 PIO_CRED,
133 RCV_HDRQ,
134 RCV_EGRBUF,
135 UREGS,
136 EVENTS,
137 STATUS,
138 RTAIL,
139 SUBCTXT_UREGS,
140 SUBCTXT_RCV_HDRQ,
141 SUBCTXT_EGRBUF,
142 SDMA_COMP
143};
144
145/*
146 * Masks and offsets defining the mmap tokens
147 */
148#define HFI1_MMAP_OFFSET_MASK 0xfffULL
149#define HFI1_MMAP_OFFSET_SHIFT 0
150#define HFI1_MMAP_SUBCTXT_MASK 0xfULL
151#define HFI1_MMAP_SUBCTXT_SHIFT 12
152#define HFI1_MMAP_CTXT_MASK 0xffULL
153#define HFI1_MMAP_CTXT_SHIFT 16
154#define HFI1_MMAP_TYPE_MASK 0xfULL
155#define HFI1_MMAP_TYPE_SHIFT 24
156#define HFI1_MMAP_MAGIC_MASK 0xffffffffULL
157#define HFI1_MMAP_MAGIC_SHIFT 32
158
159#define HFI1_MMAP_MAGIC 0xdabbad00
160
161#define HFI1_MMAP_TOKEN_SET(field, val) \
162 (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
163#define HFI1_MMAP_TOKEN_GET(field, token) \
164 (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
165#define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \
166 (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
167 HFI1_MMAP_TOKEN_SET(TYPE, type) | \
168 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
169 HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
Geliang Tange260e402015-10-03 10:34:59 +0800170 HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400171
Mike Marciniszyn77241052015-07-30 15:17:43 -0400172#define dbg(fmt, ...) \
173 pr_info(fmt, ##__VA_ARGS__)
174
Mike Marciniszyn77241052015-07-30 15:17:43 -0400175static inline int is_valid_mmap(u64 token)
176{
177 return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
178}
179
180static int hfi1_file_open(struct inode *inode, struct file *fp)
181{
Ira Weinyea3a0ee2016-07-28 12:27:35 -0400182 struct hfi1_filedata *fd;
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -0700183 struct hfi1_devdata *dd = container_of(inode->i_cdev,
184 struct hfi1_devdata,
185 user_cdev);
186
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -0700187 if (!((dd->flags & HFI1_PRESENT) && dd->kregbase1))
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700188 return -EINVAL;
189
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -0700190 if (!atomic_inc_not_zero(&dd->user_refcount))
191 return -ENXIO;
192
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -0700193 /* Just take a ref now. Not all opens result in a context assign */
194 kobject_get(&dd->kobj);
195
Mike Marciniszyn77241052015-07-30 15:17:43 -0400196 /* The real work is performed later in assign_ctxt() */
Ira Weinyea3a0ee2016-07-28 12:27:35 -0400197
198 fd = kzalloc(sizeof(*fd), GFP_KERNEL);
199
Ira Weiny3faa3d92016-07-28 15:21:19 -0400200 if (fd) {
201 fd->rec_cpu_num = -1; /* no cpu affinity by default */
202 fd->mm = current->mm;
Vegard Nossumf1f10072017-02-27 14:30:07 -0800203 mmgrab(fd->mm);
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700204 fd->dd = dd;
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -0700205 fp->private_data = fd;
206 } else {
207 fp->private_data = NULL;
208
209 if (atomic_dec_and_test(&dd->user_refcount))
210 complete(&dd->user_comp);
211
212 return -ENOMEM;
Ira Weiny3faa3d92016-07-28 15:21:19 -0400213 }
Ira Weinyea3a0ee2016-07-28 12:27:35 -0400214
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -0700215 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400216}
217
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700218static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
219 unsigned long arg)
220{
221 struct hfi1_filedata *fd = fp->private_data;
222 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700223 struct hfi1_tid_info tinfo;
224 int ret = 0;
225 unsigned long addr;
226 int uval = 0;
227 unsigned long ul_uval = 0;
228 u16 uval16 = 0;
229
Dennis Dalessandro8a1882e2016-05-19 05:26:37 -0700230 hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700231 if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
232 cmd != HFI1_IOCTL_GET_VERS &&
233 !uctxt)
234 return -EINVAL;
235
236 switch (cmd) {
237 case HFI1_IOCTL_ASSIGN_CTXT:
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700238 ret = assign_ctxt(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700239 break;
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700240
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700241 case HFI1_IOCTL_CTXT_INFO:
Michael J. Ruhlff1a5582017-09-26 07:03:57 -0700242 ret = get_ctxt_info(fd, arg, _IOC_SIZE(cmd));
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700243 break;
Michael J. Ruhlff1a5582017-09-26 07:03:57 -0700244
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700245 case HFI1_IOCTL_USER_INFO:
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -0700246 ret = get_base_info(fd, (void __user *)(unsigned long)arg,
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700247 sizeof(struct hfi1_base_info));
248 break;
249 case HFI1_IOCTL_CREDIT_UPD:
Markus Elfringf7ca5352016-07-23 08:30:52 +0200250 if (uctxt)
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700251 sc_return_credits(uctxt->sc);
252 break;
253
254 case HFI1_IOCTL_TID_UPDATE:
255 if (copy_from_user(&tinfo,
256 (struct hfi11_tid_info __user *)arg,
257 sizeof(tinfo)))
258 return -EFAULT;
259
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -0700260 ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700261 if (!ret) {
262 /*
263 * Copy the number of tidlist entries we used
264 * and the length of the buffer we registered.
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700265 */
266 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
267 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
Michael J. Ruhlf13a6e52017-07-24 07:46:42 -0700268 sizeof(tinfo.tidcnt)))
269 return -EFAULT;
270
271 addr = arg + offsetof(struct hfi1_tid_info, length);
272 if (copy_to_user((void __user *)addr, &tinfo.length,
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700273 sizeof(tinfo.length)))
274 ret = -EFAULT;
275 }
276 break;
277
278 case HFI1_IOCTL_TID_FREE:
279 if (copy_from_user(&tinfo,
280 (struct hfi11_tid_info __user *)arg,
281 sizeof(tinfo)))
282 return -EFAULT;
283
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -0700284 ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700285 if (ret)
286 break;
287 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
288 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
289 sizeof(tinfo.tidcnt)))
290 ret = -EFAULT;
291 break;
292
293 case HFI1_IOCTL_TID_INVAL_READ:
294 if (copy_from_user(&tinfo,
295 (struct hfi11_tid_info __user *)arg,
296 sizeof(tinfo)))
297 return -EFAULT;
298
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -0700299 ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700300 if (ret)
301 break;
302 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
303 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
304 sizeof(tinfo.tidcnt)))
305 ret = -EFAULT;
306 break;
307
308 case HFI1_IOCTL_RECV_CTRL:
309 ret = get_user(uval, (int __user *)arg);
310 if (ret != 0)
311 return -EFAULT;
312 ret = manage_rcvq(uctxt, fd->subctxt, uval);
313 break;
314
315 case HFI1_IOCTL_POLL_TYPE:
316 ret = get_user(uval, (int __user *)arg);
317 if (ret != 0)
318 return -EFAULT;
319 uctxt->poll_type = (typeof(uctxt->poll_type))uval;
320 break;
321
322 case HFI1_IOCTL_ACK_EVENT:
323 ret = get_user(ul_uval, (unsigned long __user *)arg);
324 if (ret != 0)
325 return -EFAULT;
326 ret = user_event_ack(uctxt, fd->subctxt, ul_uval);
327 break;
328
329 case HFI1_IOCTL_SET_PKEY:
330 ret = get_user(uval16, (u16 __user *)arg);
331 if (ret != 0)
332 return -EFAULT;
333 if (HFI1_CAP_IS_USET(PKEY_CHECK))
334 ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16);
335 else
336 return -EPERM;
337 break;
338
339 case HFI1_IOCTL_CTXT_RESET: {
340 struct send_context *sc;
341 struct hfi1_devdata *dd;
342
343 if (!uctxt || !uctxt->dd || !uctxt->sc)
344 return -EINVAL;
345
346 /*
347 * There is no protection here. User level has to
348 * guarantee that no one will be writing to the send
349 * context while it is being re-initialized.
350 * If user level breaks that guarantee, it will break
351 * it's own context and no one else's.
352 */
353 dd = uctxt->dd;
354 sc = uctxt->sc;
355 /*
356 * Wait until the interrupt handler has marked the
357 * context as halted or frozen. Report error if we time
358 * out.
359 */
360 wait_event_interruptible_timeout(
361 sc->halt_wait, (sc->flags & SCF_HALTED),
362 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
363 if (!(sc->flags & SCF_HALTED))
364 return -ENOLCK;
365
366 /*
367 * If the send context was halted due to a Freeze,
368 * wait until the device has been "unfrozen" before
369 * resetting the context.
370 */
371 if (sc->flags & SCF_FROZEN) {
372 wait_event_interruptible_timeout(
373 dd->event_queue,
374 !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
375 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
376 if (dd->flags & HFI1_FROZEN)
377 return -ENOLCK;
378
379 if (dd->flags & HFI1_FORCED_FREEZE)
380 /*
381 * Don't allow context reset if we are into
382 * forced freeze
383 */
384 return -ENODEV;
385
386 sc_disable(sc);
387 ret = sc_enable(sc);
Michael J. Ruhl22505632017-07-24 07:46:06 -0700388 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
Dennis Dalessandro8d970cf2016-05-19 05:26:24 -0700389 } else {
390 ret = sc_restart(sc);
391 }
392 if (!ret)
393 sc_return_credits(sc);
394 break;
395 }
396
397 case HFI1_IOCTL_GET_VERS:
398 uval = HFI1_USER_SWVERSION;
399 if (put_user(uval, (int __user *)arg))
400 return -EFAULT;
401 break;
402
403 default:
404 return -EINVAL;
405 }
406
407 return ret;
408}
409
Mike Marciniszyn77241052015-07-30 15:17:43 -0400410static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
411{
Ira Weiny9e10af42015-10-30 18:58:40 -0400412 struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
413 struct hfi1_user_sdma_pkt_q *pq = fd->pq;
414 struct hfi1_user_sdma_comp_q *cq = fd->cq;
Ira Weiny0904f322016-07-01 16:00:55 -0700415 int done = 0, reqs = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400416 unsigned long dim = from->nr_segs;
417
Ira Weiny0904f322016-07-01 16:00:55 -0700418 if (!cq || !pq)
419 return -EIO;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400420
Ira Weiny0904f322016-07-01 16:00:55 -0700421 if (!iter_is_iovec(from) || !dim)
422 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400423
Michael J. Ruhl34ab4de2017-08-28 11:23:27 -0700424 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400425
Ira Weiny0904f322016-07-01 16:00:55 -0700426 if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
427 return -ENOSPC;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400428
429 while (dim) {
Ira Weiny0904f322016-07-01 16:00:55 -0700430 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400431 unsigned long count = 0;
432
433 ret = hfi1_user_sdma_process_request(
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -0700434 fd, (struct iovec *)(from->iov + done),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400435 dim, &count);
Ira Weiny0904f322016-07-01 16:00:55 -0700436 if (ret) {
437 reqs = ret;
438 break;
439 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400440 dim -= count;
441 done += count;
442 reqs++;
443 }
Ira Weiny0904f322016-07-01 16:00:55 -0700444
445 return reqs;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400446}
447
448static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
449{
Ira Weiny9e10af42015-10-30 18:58:40 -0400450 struct hfi1_filedata *fd = fp->private_data;
451 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400452 struct hfi1_devdata *dd;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700453 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400454 u64 token = vma->vm_pgoff << PAGE_SHIFT,
455 memaddr = 0;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700456 void *memvirt = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400457 u8 subctxt, mapio = 0, vmf = 0, type;
458 ssize_t memlen = 0;
459 int ret = 0;
460 u16 ctxt;
461
Mike Marciniszyn77241052015-07-30 15:17:43 -0400462 if (!is_valid_mmap(token) || !uctxt ||
463 !(vma->vm_flags & VM_SHARED)) {
464 ret = -EINVAL;
465 goto done;
466 }
467 dd = uctxt->dd;
468 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
469 subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
470 type = HFI1_MMAP_TOKEN_GET(TYPE, token);
Ira Weiny9e10af42015-10-30 18:58:40 -0400471 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400472 ret = -EINVAL;
473 goto done;
474 }
475
476 flags = vma->vm_flags;
477
478 switch (type) {
479 case PIO_BUFS:
480 case PIO_BUFS_SOP:
481 memaddr = ((dd->physaddr + TXE_PIO_SEND) +
482 /* chip pio base */
Amitoj Kaur Chawlad32cf442015-10-16 22:09:08 +0530483 (uctxt->sc->hw_context * BIT(16))) +
Mike Marciniszyn77241052015-07-30 15:17:43 -0400484 /* 64K PIO space / ctxt */
485 (type == PIO_BUFS_SOP ?
486 (TXE_PIO_SIZE / 2) : 0); /* sop? */
487 /*
488 * Map only the amount allocated to the context, not the
489 * entire available context's PIO space.
490 */
Amitoj Kaur Chawla437b29d2016-03-04 22:45:00 +0530491 memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400492 flags &= ~VM_MAYREAD;
493 flags |= VM_DONTCOPY | VM_DONTEXPAND;
494 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
495 mapio = 1;
496 break;
497 case PIO_CRED:
498 if (flags & VM_WRITE) {
499 ret = -EPERM;
500 goto done;
501 }
502 /*
503 * The credit return location for this context could be on the
504 * second or third page allocated for credit returns (if number
505 * of enabled contexts > 64 and 128 respectively).
506 */
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700507 memvirt = dd->cr_base[uctxt->numa_id].va;
508 memaddr = virt_to_phys(memvirt) +
Mike Marciniszyn77241052015-07-30 15:17:43 -0400509 (((u64)uctxt->sc->hw_free -
510 (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
511 memlen = PAGE_SIZE;
512 flags &= ~VM_MAYWRITE;
513 flags |= VM_DONTCOPY | VM_DONTEXPAND;
514 /*
515 * The driver has already allocated memory for credit
516 * returns and programmed it into the chip. Has that
517 * memory been flagged as non-cached?
518 */
519 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
520 mapio = 1;
521 break;
522 case RCV_HDRQ:
Mike Marciniszyn77241052015-07-30 15:17:43 -0400523 memlen = uctxt->rcvhdrq_size;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700524 memvirt = uctxt->rcvhdrq;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400525 break;
526 case RCV_EGRBUF: {
527 unsigned long addr;
528 int i;
529 /*
530 * The RcvEgr buffer need to be handled differently
531 * as multiple non-contiguous pages need to be mapped
532 * into the user process.
533 */
534 memlen = uctxt->egrbufs.size;
535 if ((vma->vm_end - vma->vm_start) != memlen) {
536 dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
537 (vma->vm_end - vma->vm_start), memlen);
538 ret = -EINVAL;
539 goto done;
540 }
541 if (vma->vm_flags & VM_WRITE) {
542 ret = -EPERM;
543 goto done;
544 }
545 vma->vm_flags &= ~VM_MAYWRITE;
546 addr = vma->vm_start;
547 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700548 memlen = uctxt->egrbufs.buffers[i].len;
549 memvirt = uctxt->egrbufs.buffers[i].addr;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400550 ret = remap_pfn_range(
551 vma, addr,
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700552 /*
553 * virt_to_pfn() does the same, but
554 * it's not available on x86_64
555 * when CONFIG_MMU is enabled.
556 */
557 PFN_DOWN(__pa(memvirt)),
558 memlen,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400559 vma->vm_page_prot);
560 if (ret < 0)
561 goto done;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700562 addr += memlen;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400563 }
564 ret = 0;
565 goto done;
566 }
567 case UREGS:
568 /*
569 * Map only the page that contains this context's user
570 * registers.
571 */
572 memaddr = (unsigned long)
573 (dd->physaddr + RXE_PER_CONTEXT_USER)
574 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
575 /*
576 * TidFlow table is on the same page as the rest of the
577 * user registers.
578 */
579 memlen = PAGE_SIZE;
580 flags |= VM_DONTCOPY | VM_DONTEXPAND;
581 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
582 mapio = 1;
583 break;
584 case EVENTS:
585 /*
586 * Use the page where this context's flags are. User level
587 * knows where it's own bitmap is within the page.
588 */
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -0700589 memaddr = (unsigned long)
590 (dd->events + uctxt_offset(uctxt)) & PAGE_MASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400591 memlen = PAGE_SIZE;
592 /*
593 * v3.7 removes VM_RESERVED but the effect is kept by
594 * using VM_IO.
595 */
596 flags |= VM_IO | VM_DONTEXPAND;
597 vmf = 1;
598 break;
599 case STATUS:
Ira Weiny12220262017-04-09 10:17:24 -0700600 if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) {
601 ret = -EPERM;
602 goto done;
603 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400604 memaddr = kvirt_to_phys((void *)dd->status);
605 memlen = PAGE_SIZE;
606 flags |= VM_IO | VM_DONTEXPAND;
607 break;
608 case RTAIL:
609 if (!HFI1_CAP_IS_USET(DMA_RTAIL)) {
610 /*
611 * If the memory allocation failed, the context alloc
612 * also would have failed, so we would never get here
613 */
614 ret = -EINVAL;
615 goto done;
616 }
617 if (flags & VM_WRITE) {
618 ret = -EPERM;
619 goto done;
620 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400621 memlen = PAGE_SIZE;
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700622 memvirt = (void *)uctxt->rcvhdrtail_kvaddr;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400623 flags &= ~VM_MAYWRITE;
624 break;
625 case SUBCTXT_UREGS:
626 memaddr = (u64)uctxt->subctxt_uregbase;
627 memlen = PAGE_SIZE;
628 flags |= VM_IO | VM_DONTEXPAND;
629 vmf = 1;
630 break;
631 case SUBCTXT_RCV_HDRQ:
632 memaddr = (u64)uctxt->subctxt_rcvhdr_base;
633 memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
634 flags |= VM_IO | VM_DONTEXPAND;
635 vmf = 1;
636 break;
637 case SUBCTXT_EGRBUF:
638 memaddr = (u64)uctxt->subctxt_rcvegrbuf;
639 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
640 flags |= VM_IO | VM_DONTEXPAND;
641 flags &= ~VM_MAYWRITE;
642 vmf = 1;
643 break;
644 case SDMA_COMP: {
Ira Weiny9e10af42015-10-30 18:58:40 -0400645 struct hfi1_user_sdma_comp_q *cq = fd->cq;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400646
Ira Weiny9e10af42015-10-30 18:58:40 -0400647 if (!cq) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400648 ret = -EFAULT;
649 goto done;
650 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400651 memaddr = (u64)cq->comps;
Amitoj Kaur Chawla437b29d2016-03-04 22:45:00 +0530652 memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400653 flags |= VM_IO | VM_DONTEXPAND;
654 vmf = 1;
655 break;
656 }
657 default:
658 ret = -EINVAL;
659 break;
660 }
661
662 if ((vma->vm_end - vma->vm_start) != memlen) {
663 hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
Ira Weiny9e10af42015-10-30 18:58:40 -0400664 uctxt->ctxt, fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400665 (vma->vm_end - vma->vm_start), memlen);
666 ret = -EINVAL;
667 goto done;
668 }
669
670 vma->vm_flags = flags;
Sebastian Sanchez6c63e422015-11-06 20:06:56 -0500671 hfi1_cdbg(PROC,
672 "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
673 ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400674 vma->vm_end - vma->vm_start, vma->vm_flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400675 if (vmf) {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700676 vma->vm_pgoff = PFN_DOWN(memaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400677 vma->vm_ops = &vm_ops;
678 ret = 0;
679 } else if (mapio) {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700680 ret = io_remap_pfn_range(vma, vma->vm_start,
681 PFN_DOWN(memaddr),
682 memlen,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400683 vma->vm_page_prot);
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700684 } else if (memvirt) {
685 ret = remap_pfn_range(vma, vma->vm_start,
686 PFN_DOWN(__pa(memvirt)),
687 memlen,
688 vma->vm_page_prot);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400689 } else {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700690 ret = remap_pfn_range(vma, vma->vm_start,
691 PFN_DOWN(memaddr),
692 memlen,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400693 vma->vm_page_prot);
694 }
695done:
696 return ret;
697}
698
699/*
700 * Local (non-chip) user memory is not mapped right away but as it is
701 * accessed by the user-level code.
702 */
Dave Jiang11bac802017-02-24 14:56:41 -0800703static int vma_fault(struct vm_fault *vmf)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400704{
705 struct page *page;
706
707 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
708 if (!page)
709 return VM_FAULT_SIGBUS;
710
711 get_page(page);
712 vmf->page = page;
713
714 return 0;
715}
716
717static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
718{
719 struct hfi1_ctxtdata *uctxt;
720 unsigned pollflag;
721
Ira Weiny9e10af42015-10-30 18:58:40 -0400722 uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400723 if (!uctxt)
724 pollflag = POLLERR;
725 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
726 pollflag = poll_urgent(fp, pt);
727 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
728 pollflag = poll_next(fp, pt);
729 else /* invalid */
730 pollflag = POLLERR;
731
732 return pollflag;
733}
734
735static int hfi1_file_close(struct inode *inode, struct file *fp)
736{
737 struct hfi1_filedata *fdata = fp->private_data;
738 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -0700739 struct hfi1_devdata *dd = container_of(inode->i_cdev,
740 struct hfi1_devdata,
741 user_cdev);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400742 unsigned long flags, *ev;
743
744 fp->private_data = NULL;
745
746 if (!uctxt)
747 goto done;
748
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700749 hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400750
751 flush_wc();
752 /* drain user sdma queue */
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700753 hfi1_user_sdma_free_queues(fdata, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400754
Mitko Haralanov957558c2016-02-03 14:33:40 -0800755 /* release the cpu */
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700756 hfi1_put_proc_affinity(fdata->rec_cpu_num);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800757
Michael J. Ruhl224d71f2017-05-04 05:14:34 -0700758 /* clean up rcv side */
759 hfi1_user_exp_rcv_free(fdata);
760
Mike Marciniszyn77241052015-07-30 15:17:43 -0400761 /*
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700762 * fdata->uctxt is used in the above cleanup. It is not ready to be
763 * removed until here.
764 */
765 fdata->uctxt = NULL;
766 hfi1_rcd_put(uctxt);
767
768 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -0400769 * Clear any left over, unhandled events so the next process that
770 * gets this context doesn't get confused.
771 */
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -0700772 ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400773 *ev = 0;
774
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700775 spin_lock_irqsave(&dd->uctxt_lock, flags);
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700776 __clear_bit(fdata->subctxt, uctxt->in_use_ctxts);
777 if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700778 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400779 goto done;
780 }
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700781 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400782
Mike Marciniszyn77241052015-07-30 15:17:43 -0400783 /*
784 * Disable receive context and interrupt available, reset all
785 * RcvCtxtCtrl bits to default values.
786 */
787 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
788 HFI1_RCVCTRL_TIDFLOW_DIS |
789 HFI1_RCVCTRL_INTRAVAIL_DIS |
Mitko Haralanov566c1572016-02-03 14:32:49 -0800790 HFI1_RCVCTRL_TAILUPD_DIS |
Mike Marciniszyn77241052015-07-30 15:17:43 -0400791 HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
792 HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
Michael J. Ruhl22505632017-07-24 07:46:06 -0700793 HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400794 /* Clear the context's J_KEY */
Michael J. Ruhl17573972017-07-24 07:46:01 -0700795 hfi1_clear_ctxt_jkey(dd, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400796 /*
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700797 * If a send context is allocated, reset context integrity
798 * checks to default and disable the send context.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400799 */
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700800 if (uctxt->sc) {
801 set_pio_integrity(uctxt->sc);
802 sc_disable(uctxt->sc);
803 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400804
Mike Marciniszyn9c1a99c32017-06-09 15:59:40 -0700805 hfi1_free_ctxt_rcv_groups(uctxt);
Michael J. Ruhl637a9a72017-05-04 05:15:03 -0700806 hfi1_clear_ctxt_pkey(dd, uctxt);
Mitko Haralanov94158442016-04-20 06:05:36 -0700807
Mike Marciniszyn77241052015-07-30 15:17:43 -0400808 uctxt->event_flags = 0;
Michael J. Ruhl42492012017-07-24 07:45:43 -0700809
810 deallocate_ctxt(uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400811done:
Ira Weinye0cf75d2016-08-16 13:27:03 -0700812 mmdrop(fdata->mm);
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -0700813 kobject_put(&dd->kobj);
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -0700814
815 if (atomic_dec_and_test(&dd->user_refcount))
816 complete(&dd->user_comp);
817
Mike Marciniszyn77241052015-07-30 15:17:43 -0400818 kfree(fdata);
819 return 0;
820}
821
822/*
823 * Convert kernel *virtual* addresses to physical addresses.
824 * This is used to vmalloc'ed addresses.
825 */
826static u64 kvirt_to_phys(void *addr)
827{
828 struct page *page;
829 u64 paddr = 0;
830
831 page = vmalloc_to_page(addr);
832 if (page)
833 paddr = page_to_pfn(page) << PAGE_SHIFT;
834
835 return paddr;
836}
837
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700838/**
839 * complete_subctxt
840 * @fd: valid filedata pointer
841 *
842 * Sub-context info can only be set up after the base context
843 * has been completed. This is indicated by the clearing of the
844 * HFI1_CTXT_BASE_UINIT bit.
845 *
846 * Wait for the bit to be cleared, and then complete the subcontext
847 * initialization.
848 *
849 */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700850static int complete_subctxt(struct hfi1_filedata *fd)
851{
852 int ret;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700853 unsigned long flags;
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700854
855 /*
856 * sub-context info can only be set up after the base context
857 * has been completed.
858 */
859 ret = wait_event_interruptible(
860 fd->uctxt->wait,
861 !test_bit(HFI1_CTXT_BASE_UNINIT, &fd->uctxt->event_flags));
862
863 if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags))
864 ret = -ENOMEM;
865
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700866 /* Finish the sub-context init */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700867 if (!ret) {
868 fd->rec_cpu_num = hfi1_get_proc_affinity(fd->uctxt->numa_id);
869 ret = init_user_ctxt(fd, fd->uctxt);
870 }
871
872 if (ret) {
873 hfi1_rcd_put(fd->uctxt);
874 fd->uctxt = NULL;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700875 spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700876 __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700877 spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700878 }
879
880 return ret;
881}
882
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700883static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400884{
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700885 int ret;
Dennis Dalessandro0eb62652016-05-19 05:25:50 -0700886 unsigned int swmajor, swminor;
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700887 struct hfi1_ctxtdata *uctxt = NULL;
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700888 struct hfi1_user_info uinfo;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400889
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700890 if (fd->uctxt)
891 return -EINVAL;
892
893 if (sizeof(uinfo) != len)
894 return -EINVAL;
895
896 if (copy_from_user(&uinfo, (void __user *)arg, sizeof(uinfo)))
897 return -EFAULT;
898
899 swmajor = uinfo.userversion >> 16;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700900 if (swmajor != HFI1_USER_SWMAJOR)
901 return -ENODEV;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400902
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700903 if (uinfo.subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700904 return -EINVAL;
905
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700906 swminor = uinfo.userversion & 0xffff;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400907
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700908 /*
909 * Acquire the mutex to protect against multiple creations of what
910 * could be a shared base context.
911 */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400912 mutex_lock(&hfi1_mutex);
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700913 /*
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700914 * Get a sub context if available (fd->uctxt will be set).
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700915 * ret < 0 error, 0 no context, 1 sub-context found
916 */
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700917 ret = find_sub_ctxt(fd, &uinfo);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400918
919 /*
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700920 * Allocate a base context if context sharing is not required or a
921 * sub context wasn't found.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400922 */
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700923 if (!ret)
Michael J. Ruhlddebe982017-09-26 07:03:50 -0700924 ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt);
Michael J. Ruhl5fbded42017-05-04 05:14:57 -0700925
Mike Marciniszyn77241052015-07-30 15:17:43 -0400926 mutex_unlock(&hfi1_mutex);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700927
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700928 /* Depending on the context type, finish the appropriate init */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700929 switch (ret) {
930 case 0:
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700931 ret = setup_base_ctxt(fd, uctxt);
932 if (uctxt->subctxt_cnt) {
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700933 /*
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700934 * Base context is done (successfully or not), notify
935 * anybody using a sub-context that is waiting for
936 * this completion.
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700937 */
Michael J. Ruhle87473b2017-07-29 08:43:32 -0700938 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
939 wake_up(&uctxt->wait);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700940 }
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700941 break;
942 case 1:
943 ret = complete_subctxt(fd);
944 break;
945 default:
946 break;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700947 }
948
Mike Marciniszyn77241052015-07-30 15:17:43 -0400949 return ret;
950}
951
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700952/**
953 * match_ctxt
954 * @fd: valid filedata pointer
955 * @uinfo: user info to compare base context with
956 * @uctxt: context to compare uinfo to.
957 *
958 * Compare the given context with the given information to see if it
959 * can be used for a sub context.
960 */
961static int match_ctxt(struct hfi1_filedata *fd,
962 const struct hfi1_user_info *uinfo,
963 struct hfi1_ctxtdata *uctxt)
964{
965 struct hfi1_devdata *dd = fd->dd;
966 unsigned long flags;
967 u16 subctxt;
968
969 /* Skip dynamically allocated kernel contexts */
970 if (uctxt->sc && (uctxt->sc->type == SC_KERNEL))
971 return 0;
972
973 /* Skip ctxt if it doesn't match the requested one */
974 if (memcmp(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)) ||
975 uctxt->jkey != generate_jkey(current_uid()) ||
976 uctxt->subctxt_id != uinfo->subctxt_id ||
977 uctxt->subctxt_cnt != uinfo->subctxt_cnt)
978 return 0;
979
980 /* Verify the sharing process matches the base */
981 if (uctxt->userversion != uinfo->userversion)
982 return -EINVAL;
983
984 /* Find an unused sub context */
985 spin_lock_irqsave(&dd->uctxt_lock, flags);
986 if (bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
987 /* context is being closed, do not use */
988 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
989 return 0;
990 }
991
992 subctxt = find_first_zero_bit(uctxt->in_use_ctxts,
993 HFI1_MAX_SHARED_CTXTS);
994 if (subctxt >= uctxt->subctxt_cnt) {
995 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
996 return -EBUSY;
997 }
998
999 fd->subctxt = subctxt;
1000 __set_bit(fd->subctxt, uctxt->in_use_ctxts);
1001 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1002
1003 fd->uctxt = uctxt;
1004 hfi1_rcd_get(uctxt);
1005
1006 return 1;
1007}
1008
1009/**
1010 * find_sub_ctxt
1011 * @fd: valid filedata pointer
1012 * @uinfo: matching info to use to find a possible context to share.
1013 *
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001014 * The hfi1_mutex must be held when this function is called. It is
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001015 * necessary to ensure serialized creation of shared contexts.
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001016 *
1017 * Return:
1018 * 0 No sub-context found
1019 * 1 Subcontext found and allocated
1020 * errno EINVAL (incorrect parameters)
1021 * EBUSY (all sub contexts in use)
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001022 */
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001023static int find_sub_ctxt(struct hfi1_filedata *fd,
1024 const struct hfi1_user_info *uinfo)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001025{
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001026 struct hfi1_ctxtdata *uctxt;
Michael J. Ruhl5fbded42017-05-04 05:14:57 -07001027 struct hfi1_devdata *dd = fd->dd;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001028 u16 i;
1029 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001030
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001031 if (!uinfo->subctxt_cnt)
1032 return 0;
1033
Michael J. Ruhl5fbded42017-05-04 05:14:57 -07001034 for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) {
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001035 uctxt = hfi1_rcd_get_by_index(dd, i);
1036 if (uctxt) {
1037 ret = match_ctxt(fd, uinfo, uctxt);
1038 hfi1_rcd_put(uctxt);
1039 /* value of != 0 will return */
1040 if (ret)
1041 return ret;
1042 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001043 }
1044
Michael J. Ruhl5fbded42017-05-04 05:14:57 -07001045 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001046}
1047
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -07001048static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001049 struct hfi1_user_info *uinfo,
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001050 struct hfi1_ctxtdata **rcd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001051{
1052 struct hfi1_ctxtdata *uctxt;
Mitko Haralanov957558c2016-02-03 14:33:40 -08001053 int ret, numa;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001054
1055 if (dd->flags & HFI1_FROZEN) {
1056 /*
1057 * Pick an error that is unique from all other errors
1058 * that are returned so the user process knows that
1059 * it tried to allocate while the SPC was frozen. It
1060 * it should be able to retry with success in a short
1061 * while.
1062 */
1063 return -EIO;
1064 }
1065
Michael J. Ruhl5fbded42017-05-04 05:14:57 -07001066 if (!dd->freectxts)
1067 return -EBUSY;
1068
Sebastian Sanchezb094a362016-07-25 07:54:57 -07001069 /*
1070 * If we don't have a NUMA node requested, preference is towards
1071 * device NUMA node.
1072 */
1073 fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node);
Mitko Haralanov957558c2016-02-03 14:33:40 -08001074 if (fd->rec_cpu_num != -1)
1075 numa = cpu_to_node(fd->rec_cpu_num);
1076 else
1077 numa = numa_node_id();
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001078 ret = hfi1_create_ctxtdata(dd->pport, numa, &uctxt);
1079 if (ret < 0) {
1080 dd_dev_err(dd, "user ctxtdata allocation failed\n");
1081 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001082 }
Mitko Haralanov957558c2016-02-03 14:33:40 -08001083 hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)",
1084 uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num,
1085 uctxt->numa_id);
1086
Mike Marciniszyn77241052015-07-30 15:17:43 -04001087 /*
1088 * Allocate and enable a PIO send context.
1089 */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001090 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node);
Jakub Pawlak3a6982d2016-09-25 07:42:23 -07001091 if (!uctxt->sc) {
1092 ret = -ENOMEM;
1093 goto ctxdata_free;
1094 }
Sebastian Sanchez6c63e422015-11-06 20:06:56 -05001095 hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index,
1096 uctxt->sc->hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001097 ret = sc_enable(uctxt->sc);
1098 if (ret)
Jakub Pawlak3a6982d2016-09-25 07:42:23 -07001099 goto ctxdata_free;
1100
Mike Marciniszyn77241052015-07-30 15:17:43 -04001101 /*
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001102 * Setup sub context information if the user-level has requested
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001103 * sub contexts.
Mike Marciniszyn77241052015-07-30 15:17:43 -04001104 * This has to be done here so the rest of the sub-contexts find the
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001105 * proper base context.
Mike Marciniszyn77241052015-07-30 15:17:43 -04001106 */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001107 if (uinfo->subctxt_cnt)
1108 init_subctxts(uctxt, uinfo);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001109 uctxt->userversion = uinfo->userversion;
Dean Luickbdf77522016-07-28 15:21:13 -04001110 uctxt->flags = hfi1_cap_mask; /* save current flag state */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001111 init_waitqueue_head(&uctxt->wait);
1112 strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
1113 memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
1114 uctxt->jkey = generate_jkey(current_uid());
Mike Marciniszyn77241052015-07-30 15:17:43 -04001115 hfi1_stats.sps_ctxts++;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08001116 /*
1117 * Disable ASPM when there are open user/PSM contexts to avoid
1118 * issues with ASPM L1 exit latency
1119 */
1120 if (dd->freectxts-- == dd->num_user_contexts)
1121 aspm_disable_all(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001122
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001123 *rcd = uctxt;
Michael J. Ruhlf683c802017-06-09 16:00:19 -07001124
Mike Marciniszyn77241052015-07-30 15:17:43 -04001125 return 0;
Jakub Pawlak3a6982d2016-09-25 07:42:23 -07001126
1127ctxdata_free:
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001128 hfi1_free_ctxt(uctxt);
Jakub Pawlak3a6982d2016-09-25 07:42:23 -07001129 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001130}
1131
Michael J. Ruhl42492012017-07-24 07:45:43 -07001132static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt)
1133{
1134 mutex_lock(&hfi1_mutex);
1135 hfi1_stats.sps_ctxts--;
1136 if (++uctxt->dd->freectxts == uctxt->dd->num_user_contexts)
1137 aspm_enable_all(uctxt->dd);
Michael J. Ruhl42492012017-07-24 07:45:43 -07001138 mutex_unlock(&hfi1_mutex);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001139
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001140 hfi1_free_ctxt(uctxt);
Michael J. Ruhl42492012017-07-24 07:45:43 -07001141}
1142
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001143static void init_subctxts(struct hfi1_ctxtdata *uctxt,
1144 const struct hfi1_user_info *uinfo)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001145{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001146 uctxt->subctxt_cnt = uinfo->subctxt_cnt;
1147 uctxt->subctxt_id = uinfo->subctxt_id;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001148 set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001149}
1150
1151static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
1152{
1153 int ret = 0;
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001154 u16 num_subctxts = uctxt->subctxt_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001155
1156 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001157 if (!uctxt->subctxt_uregbase)
1158 return -ENOMEM;
1159
Mike Marciniszyn77241052015-07-30 15:17:43 -04001160 /* We can take the size of the RcvHdr Queue from the master */
1161 uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
1162 num_subctxts);
1163 if (!uctxt->subctxt_rcvhdr_base) {
1164 ret = -ENOMEM;
1165 goto bail_ureg;
1166 }
1167
1168 uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
1169 num_subctxts);
1170 if (!uctxt->subctxt_rcvegrbuf) {
1171 ret = -ENOMEM;
1172 goto bail_rhdr;
1173 }
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001174
1175 return 0;
1176
Mike Marciniszyn77241052015-07-30 15:17:43 -04001177bail_rhdr:
1178 vfree(uctxt->subctxt_rcvhdr_base);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001179 uctxt->subctxt_rcvhdr_base = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001180bail_ureg:
1181 vfree(uctxt->subctxt_uregbase);
1182 uctxt->subctxt_uregbase = NULL;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001183
Mike Marciniszyn77241052015-07-30 15:17:43 -04001184 return ret;
1185}
1186
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001187static void user_init(struct hfi1_ctxtdata *uctxt)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001188{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001189 unsigned int rcvctrl_ops = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001190
1191 /* initialize poll variables... */
1192 uctxt->urgent = 0;
1193 uctxt->urgent_poll = 0;
1194
1195 /*
1196 * Now enable the ctxt for receive.
1197 * For chips that are set to DMA the tail register to memory
1198 * when they change (and when the update bit transitions from
1199 * 0 to 1. So for those chips, we turn it off and then back on.
1200 * This will (very briefly) affect any other open ctxts, but the
1201 * duration is very short, and therefore isn't an issue. We
1202 * explicitly set the in-memory tail copy to 0 beforehand, so we
1203 * don't have to wait to be sure the DMA update has happened
1204 * (chip resets head/tail to 0 on transition to enable).
1205 */
1206 if (uctxt->rcvhdrtail_kvaddr)
1207 clear_rcvhdrtail(uctxt);
1208
1209 /* Setup J_KEY before enabling the context */
Michael J. Ruhl17573972017-07-24 07:46:01 -07001210 hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001211
1212 rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
Dean Luickbdf77522016-07-28 15:21:13 -04001213 if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001214 rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
1215 /*
1216 * Ignore the bit in the flags for now until proper
1217 * support for multiple packet per rcv array entry is
1218 * added.
1219 */
Dean Luickbdf77522016-07-28 15:21:13 -04001220 if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001221 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
Dean Luickbdf77522016-07-28 15:21:13 -04001222 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001223 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
Dean Luickbdf77522016-07-28 15:21:13 -04001224 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001225 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
Mitko Haralanov566c1572016-02-03 14:32:49 -08001226 /*
1227 * The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
1228 * We can't rely on the correct value to be set from prior
1229 * uses of the chip or ctxt. Therefore, add the rcvctrl op
1230 * for both cases.
1231 */
Dean Luickbdf77522016-07-28 15:21:13 -04001232 if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001233 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
Mitko Haralanov566c1572016-02-03 14:32:49 -08001234 else
1235 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
Michael J. Ruhl22505632017-07-24 07:46:06 -07001236 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001237}
1238
Michael J. Ruhlff1a5582017-09-26 07:03:57 -07001239static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001240{
1241 struct hfi1_ctxt_info cinfo;
Ira Weiny9e10af42015-10-30 18:58:40 -04001242 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Michael J. Ruhlff1a5582017-09-26 07:03:57 -07001243
1244 if (sizeof(cinfo) != len)
1245 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001246
Dan Carpenterebe6b2e2015-09-16 09:42:25 +03001247 memset(&cinfo, 0, sizeof(cinfo));
Dean Luickbdf77522016-07-28 15:21:13 -04001248 cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
1249 HFI1_CAP_MISC_MASK) << HFI1_CAP_USER_SHIFT) |
1250 HFI1_CAP_UGET_MASK(uctxt->flags, MASK) |
1251 HFI1_CAP_KGET_MASK(uctxt->flags, K2U);
Dean Luick622c2022016-07-28 15:21:21 -04001252 /* adjust flag if this fd is not able to cache */
1253 if (!fd->handler)
1254 cinfo.runtime_flags |= HFI1_CAP_TID_UNMAP; /* no caching */
1255
Mike Marciniszyn77241052015-07-30 15:17:43 -04001256 cinfo.num_active = hfi1_count_active_units();
1257 cinfo.unit = uctxt->dd->unit;
1258 cinfo.ctxt = uctxt->ctxt;
Ira Weiny9e10af42015-10-30 18:58:40 -04001259 cinfo.subctxt = fd->subctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001260 cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
1261 uctxt->dd->rcv_entries.group_size) +
1262 uctxt->expected_count;
1263 cinfo.credits = uctxt->sc->credits;
1264 cinfo.numa_node = uctxt->numa_id;
1265 cinfo.rec_cpu = fd->rec_cpu_num;
1266 cinfo.send_ctxt = uctxt->sc->hw_context;
1267
1268 cinfo.egrtids = uctxt->egrbufs.alloced;
1269 cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
1270 cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
Ira Weiny9e10af42015-10-30 18:58:40 -04001271 cinfo.sdma_ring_size = fd->cq->nentries;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001272 cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
1273
Ira Weiny9e10af42015-10-30 18:58:40 -04001274 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
Michael J. Ruhlff1a5582017-09-26 07:03:57 -07001275 if (copy_to_user((void __user *)arg, &cinfo, len))
1276 return -EFAULT;
Dean Luickbdf77522016-07-28 15:21:13 -04001277
Michael J. Ruhlff1a5582017-09-26 07:03:57 -07001278 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001279}
1280
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001281static int init_user_ctxt(struct hfi1_filedata *fd,
1282 struct hfi1_ctxtdata *uctxt)
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001283{
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001284 int ret;
1285
1286 ret = hfi1_user_sdma_alloc_queues(uctxt, fd);
1287 if (ret)
1288 return ret;
1289
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001290 ret = hfi1_user_exp_rcv_init(fd, uctxt);
1291 if (ret)
1292 hfi1_user_sdma_free_queues(fd, uctxt);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001293
1294 return ret;
1295}
1296
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001297static int setup_base_ctxt(struct hfi1_filedata *fd,
1298 struct hfi1_ctxtdata *uctxt)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001299{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001300 struct hfi1_devdata *dd = uctxt->dd;
1301 int ret = 0;
1302
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001303 hfi1_init_ctxt(uctxt->sc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001304
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001305 /* Now allocate the RcvHdr queue and eager buffers. */
1306 ret = hfi1_create_rcvhdrq(dd, uctxt);
Mitko Haralanov94158442016-04-20 06:05:36 -07001307 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001308 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001309
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001310 ret = hfi1_setup_eagerbufs(uctxt);
1311 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001312 goto setup_failed;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001313
1314 /* If sub-contexts are enabled, do the appropriate setup */
1315 if (uctxt->subctxt_cnt)
1316 ret = setup_subctxt(uctxt);
1317 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001318 goto setup_failed;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001319
Mike Marciniszyn9c1a99c32017-06-09 15:59:40 -07001320 ret = hfi1_alloc_ctxt_rcv_groups(uctxt);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001321 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001322 goto setup_failed;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001323
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001324 ret = init_user_ctxt(fd, uctxt);
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001325 if (ret)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001326 goto setup_failed;
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001327
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001328 user_init(uctxt);
1329
Michael J. Ruhle87473b2017-07-29 08:43:32 -07001330 /* Now that the context is set up, the fd can get a reference. */
1331 fd->uctxt = uctxt;
1332 hfi1_rcd_get(uctxt);
1333
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07001334 return 0;
1335
1336setup_failed:
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001337 /* Set the failed bit so sub-context init can do the right thing */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -07001338 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
1339 deallocate_ctxt(uctxt);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001340
Mike Marciniszyn77241052015-07-30 15:17:43 -04001341 return ret;
1342}
1343
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -07001344static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
1345 __u32 len)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001346{
1347 struct hfi1_base_info binfo;
Ira Weiny9e10af42015-10-30 18:58:40 -04001348 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001349 struct hfi1_devdata *dd = uctxt->dd;
1350 ssize_t sz;
1351 unsigned offset;
1352 int ret = 0;
1353
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -07001354 trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001355
1356 memset(&binfo, 0, sizeof(binfo));
1357 binfo.hw_version = dd->revision;
1358 binfo.sw_version = HFI1_KERN_SWVERSION;
1359 binfo.bthqp = kdeth_qp;
1360 binfo.jkey = uctxt->jkey;
1361 /*
1362 * If more than 64 contexts are enabled the allocated credit
1363 * return will span two or three contiguous pages. Since we only
1364 * map the page containing the context's credit return address,
1365 * we need to calculate the offset in the proper page.
1366 */
1367 offset = ((u64)uctxt->sc->hw_free -
1368 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
1369 binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001370 fd->subctxt, offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001371 binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001372 fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001373 uctxt->sc->base_addr);
1374 binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
1375 uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001376 fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001377 uctxt->sc->base_addr);
1378 binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001379 fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001380 uctxt->rcvhdrq);
1381 binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -04001382 fd->subctxt,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001383 uctxt->egrbufs.rcvtids[0].dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001384 binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001385 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001386 /*
1387 * user regs are at
1388 * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
1389 */
1390 binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001391 fd->subctxt, 0);
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -07001392 offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) *
1393 sizeof(*dd->events));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001394 binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001395 fd->subctxt,
1396 offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001397 binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001398 fd->subctxt,
1399 dd->status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001400 if (HFI1_CAP_IS_USET(DMA_RTAIL))
1401 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001402 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001403 if (uctxt->subctxt_cnt) {
1404 binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001405 uctxt->ctxt,
1406 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001407 binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001408 uctxt->ctxt,
1409 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001410 binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
Michael J. Ruhl033c16d2017-09-26 07:04:03 -07001411 uctxt->ctxt,
1412 fd->subctxt, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001413 }
1414 sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
1415 if (copy_to_user(ubase, &binfo, sz))
1416 ret = -EFAULT;
1417 return ret;
1418}
1419
1420static unsigned int poll_urgent(struct file *fp,
1421 struct poll_table_struct *pt)
1422{
Ira Weiny9e10af42015-10-30 18:58:40 -04001423 struct hfi1_filedata *fd = fp->private_data;
1424 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001425 struct hfi1_devdata *dd = uctxt->dd;
1426 unsigned pollflag;
1427
1428 poll_wait(fp, &uctxt->wait, pt);
1429
1430 spin_lock_irq(&dd->uctxt_lock);
1431 if (uctxt->urgent != uctxt->urgent_poll) {
1432 pollflag = POLLIN | POLLRDNORM;
1433 uctxt->urgent_poll = uctxt->urgent;
1434 } else {
1435 pollflag = 0;
1436 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
1437 }
1438 spin_unlock_irq(&dd->uctxt_lock);
1439
1440 return pollflag;
1441}
1442
1443static unsigned int poll_next(struct file *fp,
1444 struct poll_table_struct *pt)
1445{
Ira Weiny9e10af42015-10-30 18:58:40 -04001446 struct hfi1_filedata *fd = fp->private_data;
1447 struct hfi1_ctxtdata *uctxt = fd->uctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001448 struct hfi1_devdata *dd = uctxt->dd;
1449 unsigned pollflag;
1450
1451 poll_wait(fp, &uctxt->wait, pt);
1452
1453 spin_lock_irq(&dd->uctxt_lock);
1454 if (hdrqempty(uctxt)) {
1455 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
Michael J. Ruhl22505632017-07-24 07:46:06 -07001456 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001457 pollflag = 0;
Jubin Johne4909742016-02-14 20:22:00 -08001458 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001459 pollflag = POLLIN | POLLRDNORM;
Jubin Johne4909742016-02-14 20:22:00 -08001460 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001461 spin_unlock_irq(&dd->uctxt_lock);
1462
1463 return pollflag;
1464}
1465
1466/*
1467 * Find all user contexts in use, and set the specified bit in their
1468 * event mask.
1469 * See also find_ctxt() for a similar use, that is specific to send buffers.
1470 */
1471int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
1472{
1473 struct hfi1_ctxtdata *uctxt;
1474 struct hfi1_devdata *dd = ppd->dd;
Michael J. Ruhle6f76222017-07-24 07:45:55 -07001475 u16 ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001476
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001477 if (!dd->events)
1478 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001479
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07001480 for (ctxt = dd->first_dyn_alloc_ctxt; ctxt < dd->num_rcv_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001481 ctxt++) {
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001482 uctxt = hfi1_rcd_get_by_index(dd, ctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001483 if (uctxt) {
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -07001484 unsigned long *evs;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001485 int i;
1486 /*
1487 * subctxt_cnt is 0 if not shared, so do base
1488 * separately, first, then remaining subctxt, if any
1489 */
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -07001490 evs = dd->events + uctxt_offset(uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001491 set_bit(evtbit, evs);
1492 for (i = 1; i < uctxt->subctxt_cnt; i++)
1493 set_bit(evtbit, evs + i);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001494 hfi1_rcd_put(uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001495 }
1496 }
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001497
1498 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001499}
1500
1501/**
1502 * manage_rcvq - manage a context's receive queue
1503 * @uctxt: the context
1504 * @subctxt: the sub-context
1505 * @start_stop: action to carry out
1506 *
1507 * start_stop == 0 disables receive on the context, for use in queue
1508 * overflow conditions. start_stop==1 re-enables, to be used to
1509 * re-init the software copy of the head register
1510 */
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001511static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001512 int start_stop)
1513{
1514 struct hfi1_devdata *dd = uctxt->dd;
1515 unsigned int rcvctrl_op;
1516
1517 if (subctxt)
1518 goto bail;
1519 /* atomically clear receive enable ctxt. */
1520 if (start_stop) {
1521 /*
1522 * On enable, force in-memory copy of the tail register to
1523 * 0, so that protocol code doesn't have to worry about
1524 * whether or not the chip has yet updated the in-memory
1525 * copy or not on return from the system call. The chip
1526 * always resets it's tail register back to 0 on a
1527 * transition from disabled to enabled.
1528 */
1529 if (uctxt->rcvhdrtail_kvaddr)
1530 clear_rcvhdrtail(uctxt);
1531 rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
Jubin Johne4909742016-02-14 20:22:00 -08001532 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001533 rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
Jubin Johne4909742016-02-14 20:22:00 -08001534 }
Michael J. Ruhl22505632017-07-24 07:46:06 -07001535 hfi1_rcvctrl(dd, rcvctrl_op, uctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001536 /* always; new head should be equal to new tail; see above */
1537bail:
1538 return 0;
1539}
1540
1541/*
1542 * clear the event notifier events for this context.
1543 * User process then performs actions appropriate to bit having been
1544 * set, if desired, and checks again in future.
1545 */
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001546static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001547 unsigned long events)
1548{
1549 int i;
1550 struct hfi1_devdata *dd = uctxt->dd;
1551 unsigned long *evs;
1552
1553 if (!dd->events)
1554 return 0;
1555
Michael J. Ruhl21e5acc2017-09-26 07:00:56 -07001556 evs = dd->events + uctxt_offset(uctxt) + subctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001557
1558 for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
1559 if (!test_bit(i, &events))
1560 continue;
1561 clear_bit(i, evs);
1562 }
1563 return 0;
1564}
1565
Michael J. Ruhl8737ce92017-05-04 05:15:15 -07001566static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001567{
1568 int ret = -ENOENT, i, intable = 0;
1569 struct hfi1_pportdata *ppd = uctxt->ppd;
1570 struct hfi1_devdata *dd = uctxt->dd;
1571
1572 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
1573 ret = -EINVAL;
1574 goto done;
1575 }
1576
1577 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
1578 if (pkey == ppd->pkeys[i]) {
1579 intable = 1;
1580 break;
1581 }
1582
1583 if (intable)
Michael J. Ruhl17573972017-07-24 07:46:01 -07001584 ret = hfi1_set_ctxt_pkey(dd, uctxt, pkey);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001585done:
1586 return ret;
1587}
1588
Mike Marciniszyn77241052015-07-30 15:17:43 -04001589static void user_remove(struct hfi1_devdata *dd)
1590{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001591
1592 hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001593}
1594
1595static int user_add(struct hfi1_devdata *dd)
1596{
1597 char name[10];
1598 int ret;
1599
Mike Marciniszyn77241052015-07-30 15:17:43 -04001600 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
Dennis Dalessandro0eb62652016-05-19 05:25:50 -07001601 ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops,
Ira Weinye116a642015-09-17 13:47:49 -04001602 &dd->user_cdev, &dd->user_device,
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -07001603 true, &dd->kobj);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001604 if (ret)
Dennis Dalessandro7312f292016-05-19 05:25:57 -07001605 user_remove(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001606
Mike Marciniszyn77241052015-07-30 15:17:43 -04001607 return ret;
1608}
1609
1610/*
1611 * Create per-unit files in /dev
1612 */
1613int hfi1_device_create(struct hfi1_devdata *dd)
1614{
Dennis Dalessandro0f7b1f92016-05-19 05:26:10 -07001615 return user_add(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001616}
1617
1618/*
1619 * Remove per-unit files in /dev
1620 * void, core kernel returns no errors for this stuff
1621 */
1622void hfi1_device_remove(struct hfi1_devdata *dd)
1623{
1624 user_remove(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001625}