blob: c1d6bc32025cd2994d76287d9ac2669d2dfca4b0 [file] [log] [blame]
Sudeep Dutte9089f42015-04-29 05:32:35 -07001/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include "scif_main.h"
19
20static int scif_fdopen(struct inode *inode, struct file *f)
21{
22 struct scif_endpt *priv = scif_open();
23
24 if (!priv)
25 return -ENOMEM;
26 f->private_data = priv;
27 return 0;
28}
29
30static int scif_fdclose(struct inode *inode, struct file *f)
31{
32 struct scif_endpt *priv = f->private_data;
33
34 return scif_close(priv);
35}
36
37static int scif_fdflush(struct file *f, fl_owner_t id)
38{
39 struct scif_endpt *ep = f->private_data;
40
41 spin_lock(&ep->lock);
42 /*
43 * The listening endpoint stashes the open file information before
44 * waiting for incoming connections. The release callback would never be
45 * called if the application closed the endpoint, while waiting for
46 * incoming connections from a separate thread since the file descriptor
47 * reference count is bumped up in the accept IOCTL. Call the flush
48 * routine if the id matches the endpoint open file information so that
49 * the listening endpoint can be woken up and the fd released.
50 */
51 if (ep->files == id)
52 __scif_flush(ep);
53 spin_unlock(&ep->lock);
54 return 0;
55}
56
57static __always_inline void scif_err_debug(int err, const char *str)
58{
59 /*
60 * ENOTCONN is a common uninteresting error which is
61 * flooding debug messages to the console unnecessarily.
62 */
63 if (err < 0 && err != -ENOTCONN)
64 dev_dbg(scif_info.mdev.this_device, "%s err %d\n", str, err);
65}
66
67static long scif_fdioctl(struct file *f, unsigned int cmd, unsigned long arg)
68{
69 struct scif_endpt *priv = f->private_data;
70 void __user *argp = (void __user *)arg;
Nikhil Rao76371c72015-04-29 05:32:36 -070071 int err = 0;
Sudeep Dutte9089f42015-04-29 05:32:35 -070072 bool non_block = false;
73
74 non_block = !!(f->f_flags & O_NONBLOCK);
75
76 switch (cmd) {
77 case SCIF_BIND:
78 {
79 int pn;
80
81 if (copy_from_user(&pn, argp, sizeof(pn)))
82 return -EFAULT;
83
84 pn = scif_bind(priv, pn);
85 if (pn < 0)
86 return pn;
87
88 if (copy_to_user(argp, &pn, sizeof(pn)))
89 return -EFAULT;
90
91 return 0;
92 }
93 case SCIF_LISTEN:
94 return scif_listen(priv, arg);
Nikhil Rao76371c72015-04-29 05:32:36 -070095 case SCIF_CONNECT:
96 {
97 struct scifioctl_connect req;
98 struct scif_endpt *ep = (struct scif_endpt *)priv;
99
100 if (copy_from_user(&req, argp, sizeof(req)))
101 return -EFAULT;
102
103 err = __scif_connect(priv, &req.peer, non_block);
104 if (err < 0)
105 return err;
106
107 req.self.node = ep->port.node;
108 req.self.port = ep->port.port;
109
110 if (copy_to_user(argp, &req, sizeof(req)))
111 return -EFAULT;
112
113 return 0;
114 }
115 /*
116 * Accept is done in two halves. The request ioctl does the basic
117 * functionality of accepting the request and returning the information
118 * about it including the internal ID of the end point. The register
119 * is done with the internal ID on a new file descriptor opened by the
120 * requesting process.
121 */
122 case SCIF_ACCEPTREQ:
123 {
124 struct scifioctl_accept request;
125 scif_epd_t *ep = (scif_epd_t *)&request.endpt;
126
127 if (copy_from_user(&request, argp, sizeof(request)))
128 return -EFAULT;
129
130 err = scif_accept(priv, &request.peer, ep, request.flags);
131 if (err < 0)
132 return err;
133
134 if (copy_to_user(argp, &request, sizeof(request))) {
135 scif_close(*ep);
136 return -EFAULT;
137 }
138 /*
139 * Add to the list of user mode eps where the second half
140 * of the accept is not yet completed.
141 */
142 spin_lock(&scif_info.eplock);
143 list_add_tail(&((*ep)->miacceptlist), &scif_info.uaccept);
144 list_add_tail(&((*ep)->liacceptlist), &priv->li_accept);
145 (*ep)->listenep = priv;
146 priv->acceptcnt++;
147 spin_unlock(&scif_info.eplock);
148
149 return 0;
150 }
151 case SCIF_ACCEPTREG:
152 {
153 struct scif_endpt *priv = f->private_data;
154 struct scif_endpt *newep;
155 struct scif_endpt *lisep;
156 struct scif_endpt *fep = NULL;
157 struct scif_endpt *tmpep;
158 struct list_head *pos, *tmpq;
159
160 /* Finally replace the pointer to the accepted endpoint */
161 if (copy_from_user(&newep, argp, sizeof(void *)))
162 return -EFAULT;
163
164 /* Remove form the user accept queue */
165 spin_lock(&scif_info.eplock);
166 list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
167 tmpep = list_entry(pos,
168 struct scif_endpt, miacceptlist);
169 if (tmpep == newep) {
170 list_del(pos);
171 fep = tmpep;
172 break;
173 }
174 }
175
176 if (!fep) {
177 spin_unlock(&scif_info.eplock);
178 return -ENOENT;
179 }
180
181 lisep = newep->listenep;
182 list_for_each_safe(pos, tmpq, &lisep->li_accept) {
183 tmpep = list_entry(pos,
184 struct scif_endpt, liacceptlist);
185 if (tmpep == newep) {
186 list_del(pos);
187 lisep->acceptcnt--;
188 break;
189 }
190 }
191
192 spin_unlock(&scif_info.eplock);
193
194 /* Free the resources automatically created from the open. */
195 scif_teardown_ep(priv);
196 scif_add_epd_to_zombie_list(priv, !SCIF_EPLOCK_HELD);
197 f->private_data = newep;
198 return 0;
199 }
Sudeep Dutte9089f42015-04-29 05:32:35 -0700200 }
201 return -EINVAL;
202}
203
204const struct file_operations scif_fops = {
205 .open = scif_fdopen,
206 .release = scif_fdclose,
207 .unlocked_ioctl = scif_fdioctl,
208 .flush = scif_fdflush,
209 .owner = THIS_MODULE,
210};