blob: eccf7e7135f904a7b0f9ee6cd6f3abec73c4792c [file] [log] [blame]
Sudeep Dutte9089f42015-04-29 05:32:35 -07001/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include "scif_main.h"
19
20static int scif_fdopen(struct inode *inode, struct file *f)
21{
22 struct scif_endpt *priv = scif_open();
23
24 if (!priv)
25 return -ENOMEM;
26 f->private_data = priv;
27 return 0;
28}
29
30static int scif_fdclose(struct inode *inode, struct file *f)
31{
32 struct scif_endpt *priv = f->private_data;
33
34 return scif_close(priv);
35}
36
37static int scif_fdflush(struct file *f, fl_owner_t id)
38{
39 struct scif_endpt *ep = f->private_data;
40
41 spin_lock(&ep->lock);
42 /*
43 * The listening endpoint stashes the open file information before
44 * waiting for incoming connections. The release callback would never be
45 * called if the application closed the endpoint, while waiting for
46 * incoming connections from a separate thread since the file descriptor
47 * reference count is bumped up in the accept IOCTL. Call the flush
48 * routine if the id matches the endpoint open file information so that
49 * the listening endpoint can be woken up and the fd released.
50 */
51 if (ep->files == id)
52 __scif_flush(ep);
53 spin_unlock(&ep->lock);
54 return 0;
55}
56
57static __always_inline void scif_err_debug(int err, const char *str)
58{
59 /*
60 * ENOTCONN is a common uninteresting error which is
61 * flooding debug messages to the console unnecessarily.
62 */
63 if (err < 0 && err != -ENOTCONN)
64 dev_dbg(scif_info.mdev.this_device, "%s err %d\n", str, err);
65}
66
67static long scif_fdioctl(struct file *f, unsigned int cmd, unsigned long arg)
68{
69 struct scif_endpt *priv = f->private_data;
70 void __user *argp = (void __user *)arg;
Nikhil Rao76371c72015-04-29 05:32:36 -070071 int err = 0;
Sudeep Duttfdd9fd52015-04-29 05:32:37 -070072 struct scifioctl_msg request;
Sudeep Dutte9089f42015-04-29 05:32:35 -070073 bool non_block = false;
74
75 non_block = !!(f->f_flags & O_NONBLOCK);
76
77 switch (cmd) {
78 case SCIF_BIND:
79 {
80 int pn;
81
82 if (copy_from_user(&pn, argp, sizeof(pn)))
83 return -EFAULT;
84
85 pn = scif_bind(priv, pn);
86 if (pn < 0)
87 return pn;
88
89 if (copy_to_user(argp, &pn, sizeof(pn)))
90 return -EFAULT;
91
92 return 0;
93 }
94 case SCIF_LISTEN:
95 return scif_listen(priv, arg);
Nikhil Rao76371c72015-04-29 05:32:36 -070096 case SCIF_CONNECT:
97 {
98 struct scifioctl_connect req;
99 struct scif_endpt *ep = (struct scif_endpt *)priv;
100
101 if (copy_from_user(&req, argp, sizeof(req)))
102 return -EFAULT;
103
104 err = __scif_connect(priv, &req.peer, non_block);
105 if (err < 0)
106 return err;
107
108 req.self.node = ep->port.node;
109 req.self.port = ep->port.port;
110
111 if (copy_to_user(argp, &req, sizeof(req)))
112 return -EFAULT;
113
114 return 0;
115 }
116 /*
117 * Accept is done in two halves. The request ioctl does the basic
118 * functionality of accepting the request and returning the information
119 * about it including the internal ID of the end point. The register
120 * is done with the internal ID on a new file descriptor opened by the
121 * requesting process.
122 */
123 case SCIF_ACCEPTREQ:
124 {
125 struct scifioctl_accept request;
126 scif_epd_t *ep = (scif_epd_t *)&request.endpt;
127
128 if (copy_from_user(&request, argp, sizeof(request)))
129 return -EFAULT;
130
131 err = scif_accept(priv, &request.peer, ep, request.flags);
132 if (err < 0)
133 return err;
134
135 if (copy_to_user(argp, &request, sizeof(request))) {
136 scif_close(*ep);
137 return -EFAULT;
138 }
139 /*
140 * Add to the list of user mode eps where the second half
141 * of the accept is not yet completed.
142 */
143 spin_lock(&scif_info.eplock);
144 list_add_tail(&((*ep)->miacceptlist), &scif_info.uaccept);
145 list_add_tail(&((*ep)->liacceptlist), &priv->li_accept);
146 (*ep)->listenep = priv;
147 priv->acceptcnt++;
148 spin_unlock(&scif_info.eplock);
149
150 return 0;
151 }
152 case SCIF_ACCEPTREG:
153 {
154 struct scif_endpt *priv = f->private_data;
155 struct scif_endpt *newep;
156 struct scif_endpt *lisep;
157 struct scif_endpt *fep = NULL;
158 struct scif_endpt *tmpep;
159 struct list_head *pos, *tmpq;
160
161 /* Finally replace the pointer to the accepted endpoint */
162 if (copy_from_user(&newep, argp, sizeof(void *)))
163 return -EFAULT;
164
165 /* Remove form the user accept queue */
166 spin_lock(&scif_info.eplock);
167 list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
168 tmpep = list_entry(pos,
169 struct scif_endpt, miacceptlist);
170 if (tmpep == newep) {
171 list_del(pos);
172 fep = tmpep;
173 break;
174 }
175 }
176
177 if (!fep) {
178 spin_unlock(&scif_info.eplock);
179 return -ENOENT;
180 }
181
182 lisep = newep->listenep;
183 list_for_each_safe(pos, tmpq, &lisep->li_accept) {
184 tmpep = list_entry(pos,
185 struct scif_endpt, liacceptlist);
186 if (tmpep == newep) {
187 list_del(pos);
188 lisep->acceptcnt--;
189 break;
190 }
191 }
192
193 spin_unlock(&scif_info.eplock);
194
195 /* Free the resources automatically created from the open. */
196 scif_teardown_ep(priv);
197 scif_add_epd_to_zombie_list(priv, !SCIF_EPLOCK_HELD);
198 f->private_data = newep;
199 return 0;
200 }
Sudeep Duttfdd9fd52015-04-29 05:32:37 -0700201 case SCIF_SEND:
202 {
203 struct scif_endpt *priv = f->private_data;
204
205 if (copy_from_user(&request, argp,
206 sizeof(struct scifioctl_msg))) {
207 err = -EFAULT;
208 goto send_err;
209 }
210 err = scif_user_send(priv, (void __user *)request.msg,
211 request.len, request.flags);
212 if (err < 0)
213 goto send_err;
214 if (copy_to_user(&
215 ((struct scifioctl_msg __user *)argp)->out_len,
216 &err, sizeof(err))) {
217 err = -EFAULT;
218 goto send_err;
219 }
220 err = 0;
221send_err:
222 scif_err_debug(err, "scif_send");
223 return err;
224 }
225 case SCIF_RECV:
226 {
227 struct scif_endpt *priv = f->private_data;
228
229 if (copy_from_user(&request, argp,
230 sizeof(struct scifioctl_msg))) {
231 err = -EFAULT;
232 goto recv_err;
233 }
234
235 err = scif_user_recv(priv, (void __user *)request.msg,
236 request.len, request.flags);
237 if (err < 0)
238 goto recv_err;
239
240 if (copy_to_user(&
241 ((struct scifioctl_msg __user *)argp)->out_len,
242 &err, sizeof(err))) {
243 err = -EFAULT;
244 goto recv_err;
245 }
246 err = 0;
247recv_err:
248 scif_err_debug(err, "scif_recv");
249 return err;
250 }
251 case SCIF_GET_NODEIDS:
252 {
253 struct scifioctl_node_ids node_ids;
254 int entries;
255 u16 *nodes;
256 void __user *unodes, *uself;
257 u16 self;
258
259 if (copy_from_user(&node_ids, argp, sizeof(node_ids))) {
260 err = -EFAULT;
261 goto getnodes_err2;
262 }
263
264 entries = min_t(int, scif_info.maxid, node_ids.len);
265 nodes = kmalloc_array(entries, sizeof(u16), GFP_KERNEL);
266 if (entries && !nodes) {
267 err = -ENOMEM;
268 goto getnodes_err2;
269 }
270 node_ids.len = scif_get_node_ids(nodes, entries, &self);
271
272 unodes = (void __user *)node_ids.nodes;
273 if (copy_to_user(unodes, nodes, sizeof(u16) * entries)) {
274 err = -EFAULT;
275 goto getnodes_err1;
276 }
277
278 uself = (void __user *)node_ids.self;
279 if (copy_to_user(uself, &self, sizeof(u16))) {
280 err = -EFAULT;
281 goto getnodes_err1;
282 }
283
284 if (copy_to_user(argp, &node_ids, sizeof(node_ids))) {
285 err = -EFAULT;
286 goto getnodes_err1;
287 }
288getnodes_err1:
289 kfree(nodes);
290getnodes_err2:
291 return err;
292 }
Sudeep Dutte9089f42015-04-29 05:32:35 -0700293 }
294 return -EINVAL;
295}
296
297const struct file_operations scif_fops = {
298 .open = scif_fdopen,
299 .release = scif_fdclose,
300 .unlocked_ioctl = scif_fdioctl,
301 .flush = scif_fdflush,
302 .owner = THIS_MODULE,
303};