blob: 24e47f73e45a20a54327bd7a28b61511bfb0ba4e [file] [log] [blame]
Sudeep Dutte9089f42015-04-29 05:32:35 -07001/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include "scif_main.h"
19
20static int scif_fdopen(struct inode *inode, struct file *f)
21{
22 struct scif_endpt *priv = scif_open();
23
24 if (!priv)
25 return -ENOMEM;
26 f->private_data = priv;
27 return 0;
28}
29
30static int scif_fdclose(struct inode *inode, struct file *f)
31{
32 struct scif_endpt *priv = f->private_data;
33
34 return scif_close(priv);
35}
36
Ashutosh Dixitb7f94442015-09-29 18:10:44 -070037static unsigned int scif_fdpoll(struct file *f, poll_table *wait)
38{
39 struct scif_endpt *priv = f->private_data;
40
41 return __scif_pollfd(f, wait, priv);
42}
43
Sudeep Dutte9089f42015-04-29 05:32:35 -070044static int scif_fdflush(struct file *f, fl_owner_t id)
45{
46 struct scif_endpt *ep = f->private_data;
47
48 spin_lock(&ep->lock);
49 /*
50 * The listening endpoint stashes the open file information before
51 * waiting for incoming connections. The release callback would never be
52 * called if the application closed the endpoint, while waiting for
53 * incoming connections from a separate thread since the file descriptor
54 * reference count is bumped up in the accept IOCTL. Call the flush
55 * routine if the id matches the endpoint open file information so that
56 * the listening endpoint can be woken up and the fd released.
57 */
58 if (ep->files == id)
59 __scif_flush(ep);
60 spin_unlock(&ep->lock);
61 return 0;
62}
63
64static __always_inline void scif_err_debug(int err, const char *str)
65{
66 /*
67 * ENOTCONN is a common uninteresting error which is
68 * flooding debug messages to the console unnecessarily.
69 */
70 if (err < 0 && err != -ENOTCONN)
71 dev_dbg(scif_info.mdev.this_device, "%s err %d\n", str, err);
72}
73
74static long scif_fdioctl(struct file *f, unsigned int cmd, unsigned long arg)
75{
76 struct scif_endpt *priv = f->private_data;
77 void __user *argp = (void __user *)arg;
Nikhil Rao76371c72015-04-29 05:32:36 -070078 int err = 0;
Sudeep Duttfdd9fd52015-04-29 05:32:37 -070079 struct scifioctl_msg request;
Sudeep Dutte9089f42015-04-29 05:32:35 -070080 bool non_block = false;
81
82 non_block = !!(f->f_flags & O_NONBLOCK);
83
84 switch (cmd) {
85 case SCIF_BIND:
86 {
87 int pn;
88
89 if (copy_from_user(&pn, argp, sizeof(pn)))
90 return -EFAULT;
91
92 pn = scif_bind(priv, pn);
93 if (pn < 0)
94 return pn;
95
96 if (copy_to_user(argp, &pn, sizeof(pn)))
97 return -EFAULT;
98
99 return 0;
100 }
101 case SCIF_LISTEN:
102 return scif_listen(priv, arg);
Nikhil Rao76371c72015-04-29 05:32:36 -0700103 case SCIF_CONNECT:
104 {
105 struct scifioctl_connect req;
106 struct scif_endpt *ep = (struct scif_endpt *)priv;
107
108 if (copy_from_user(&req, argp, sizeof(req)))
109 return -EFAULT;
110
111 err = __scif_connect(priv, &req.peer, non_block);
112 if (err < 0)
113 return err;
114
115 req.self.node = ep->port.node;
116 req.self.port = ep->port.port;
117
118 if (copy_to_user(argp, &req, sizeof(req)))
119 return -EFAULT;
120
121 return 0;
122 }
123 /*
124 * Accept is done in two halves. The request ioctl does the basic
125 * functionality of accepting the request and returning the information
126 * about it including the internal ID of the end point. The register
127 * is done with the internal ID on a new file descriptor opened by the
128 * requesting process.
129 */
130 case SCIF_ACCEPTREQ:
131 {
132 struct scifioctl_accept request;
133 scif_epd_t *ep = (scif_epd_t *)&request.endpt;
134
135 if (copy_from_user(&request, argp, sizeof(request)))
136 return -EFAULT;
137
138 err = scif_accept(priv, &request.peer, ep, request.flags);
139 if (err < 0)
140 return err;
141
142 if (copy_to_user(argp, &request, sizeof(request))) {
143 scif_close(*ep);
144 return -EFAULT;
145 }
146 /*
147 * Add to the list of user mode eps where the second half
148 * of the accept is not yet completed.
149 */
150 spin_lock(&scif_info.eplock);
151 list_add_tail(&((*ep)->miacceptlist), &scif_info.uaccept);
152 list_add_tail(&((*ep)->liacceptlist), &priv->li_accept);
153 (*ep)->listenep = priv;
154 priv->acceptcnt++;
155 spin_unlock(&scif_info.eplock);
156
157 return 0;
158 }
159 case SCIF_ACCEPTREG:
160 {
161 struct scif_endpt *priv = f->private_data;
162 struct scif_endpt *newep;
163 struct scif_endpt *lisep;
164 struct scif_endpt *fep = NULL;
165 struct scif_endpt *tmpep;
166 struct list_head *pos, *tmpq;
167
168 /* Finally replace the pointer to the accepted endpoint */
169 if (copy_from_user(&newep, argp, sizeof(void *)))
170 return -EFAULT;
171
172 /* Remove form the user accept queue */
173 spin_lock(&scif_info.eplock);
174 list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
175 tmpep = list_entry(pos,
176 struct scif_endpt, miacceptlist);
177 if (tmpep == newep) {
178 list_del(pos);
179 fep = tmpep;
180 break;
181 }
182 }
183
184 if (!fep) {
185 spin_unlock(&scif_info.eplock);
186 return -ENOENT;
187 }
188
189 lisep = newep->listenep;
190 list_for_each_safe(pos, tmpq, &lisep->li_accept) {
191 tmpep = list_entry(pos,
192 struct scif_endpt, liacceptlist);
193 if (tmpep == newep) {
194 list_del(pos);
195 lisep->acceptcnt--;
196 break;
197 }
198 }
199
200 spin_unlock(&scif_info.eplock);
201
202 /* Free the resources automatically created from the open. */
Ashutosh Dixitb7f94442015-09-29 18:10:44 -0700203 scif_anon_inode_fput(priv);
Nikhil Rao76371c72015-04-29 05:32:36 -0700204 scif_teardown_ep(priv);
205 scif_add_epd_to_zombie_list(priv, !SCIF_EPLOCK_HELD);
206 f->private_data = newep;
207 return 0;
208 }
Sudeep Duttfdd9fd52015-04-29 05:32:37 -0700209 case SCIF_SEND:
210 {
211 struct scif_endpt *priv = f->private_data;
212
213 if (copy_from_user(&request, argp,
214 sizeof(struct scifioctl_msg))) {
215 err = -EFAULT;
216 goto send_err;
217 }
218 err = scif_user_send(priv, (void __user *)request.msg,
219 request.len, request.flags);
220 if (err < 0)
221 goto send_err;
222 if (copy_to_user(&
223 ((struct scifioctl_msg __user *)argp)->out_len,
224 &err, sizeof(err))) {
225 err = -EFAULT;
226 goto send_err;
227 }
228 err = 0;
229send_err:
230 scif_err_debug(err, "scif_send");
231 return err;
232 }
233 case SCIF_RECV:
234 {
235 struct scif_endpt *priv = f->private_data;
236
237 if (copy_from_user(&request, argp,
238 sizeof(struct scifioctl_msg))) {
239 err = -EFAULT;
240 goto recv_err;
241 }
242
243 err = scif_user_recv(priv, (void __user *)request.msg,
244 request.len, request.flags);
245 if (err < 0)
246 goto recv_err;
247
248 if (copy_to_user(&
249 ((struct scifioctl_msg __user *)argp)->out_len,
250 &err, sizeof(err))) {
251 err = -EFAULT;
252 goto recv_err;
253 }
254 err = 0;
255recv_err:
256 scif_err_debug(err, "scif_recv");
257 return err;
258 }
259 case SCIF_GET_NODEIDS:
260 {
261 struct scifioctl_node_ids node_ids;
262 int entries;
263 u16 *nodes;
264 void __user *unodes, *uself;
265 u16 self;
266
267 if (copy_from_user(&node_ids, argp, sizeof(node_ids))) {
268 err = -EFAULT;
269 goto getnodes_err2;
270 }
271
272 entries = min_t(int, scif_info.maxid, node_ids.len);
273 nodes = kmalloc_array(entries, sizeof(u16), GFP_KERNEL);
274 if (entries && !nodes) {
275 err = -ENOMEM;
276 goto getnodes_err2;
277 }
278 node_ids.len = scif_get_node_ids(nodes, entries, &self);
279
280 unodes = (void __user *)node_ids.nodes;
281 if (copy_to_user(unodes, nodes, sizeof(u16) * entries)) {
282 err = -EFAULT;
283 goto getnodes_err1;
284 }
285
286 uself = (void __user *)node_ids.self;
287 if (copy_to_user(uself, &self, sizeof(u16))) {
288 err = -EFAULT;
289 goto getnodes_err1;
290 }
291
292 if (copy_to_user(argp, &node_ids, sizeof(node_ids))) {
293 err = -EFAULT;
294 goto getnodes_err1;
295 }
296getnodes_err1:
297 kfree(nodes);
298getnodes_err2:
299 return err;
300 }
Sudeep Dutte9089f42015-04-29 05:32:35 -0700301 }
302 return -EINVAL;
303}
304
305const struct file_operations scif_fops = {
306 .open = scif_fdopen,
307 .release = scif_fdclose,
308 .unlocked_ioctl = scif_fdioctl,
Ashutosh Dixitb7f94442015-09-29 18:10:44 -0700309 .poll = scif_fdpoll,
Sudeep Dutte9089f42015-04-29 05:32:35 -0700310 .flush = scif_fdflush,
311 .owner = THIS_MODULE,
312};