blob: b4bfbb08a8e3052fa241d127bb3413446fd0d4c1 [file] [log] [blame]
Sudeep Dutte9089f42015-04-29 05:32:35 -07001/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include "scif_main.h"
19#include "scif_map.h"
20
21void scif_cleanup_ep_qp(struct scif_endpt *ep)
22{
23 struct scif_qp *qp = ep->qp_info.qp;
24
25 if (qp->outbound_q.rb_base) {
26 scif_iounmap((void *)qp->outbound_q.rb_base,
27 qp->outbound_q.size, ep->remote_dev);
28 qp->outbound_q.rb_base = NULL;
29 }
30 if (qp->remote_qp) {
31 scif_iounmap((void *)qp->remote_qp,
32 sizeof(struct scif_qp), ep->remote_dev);
33 qp->remote_qp = NULL;
34 }
35 if (qp->local_qp) {
36 scif_unmap_single(qp->local_qp, ep->remote_dev,
37 sizeof(struct scif_qp));
38 qp->local_qp = 0x0;
39 }
40 if (qp->local_buf) {
41 scif_unmap_single(qp->local_buf, ep->remote_dev,
42 SCIF_ENDPT_QP_SIZE);
43 qp->local_buf = 0;
44 }
45}
46
47void scif_teardown_ep(void *endpt)
48{
49 struct scif_endpt *ep = endpt;
50 struct scif_qp *qp = ep->qp_info.qp;
51
52 if (qp) {
53 spin_lock(&ep->lock);
54 scif_cleanup_ep_qp(ep);
55 spin_unlock(&ep->lock);
56 kfree(qp->inbound_q.rb_base);
57 kfree(qp);
58 }
59}
60
61/*
62 * Enqueue the endpoint to the zombie list for cleanup.
63 * The endpoint should not be accessed once this API returns.
64 */
65void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held)
66{
67 if (!eplock_held)
68 spin_lock(&scif_info.eplock);
69 spin_lock(&ep->lock);
70 ep->state = SCIFEP_ZOMBIE;
71 spin_unlock(&ep->lock);
72 list_add_tail(&ep->list, &scif_info.zombie);
73 scif_info.nr_zombies++;
74 if (!eplock_held)
75 spin_unlock(&scif_info.eplock);
76 schedule_work(&scif_info.misc_work);
77}
78
Nikhil Rao76371c72015-04-29 05:32:36 -070079static struct scif_endpt *scif_find_listen_ep(u16 port)
80{
81 struct scif_endpt *ep = NULL;
82 struct list_head *pos, *tmpq;
83
84 spin_lock(&scif_info.eplock);
85 list_for_each_safe(pos, tmpq, &scif_info.listen) {
86 ep = list_entry(pos, struct scif_endpt, list);
87 if (ep->port.port == port) {
88 spin_lock(&ep->lock);
89 spin_unlock(&scif_info.eplock);
90 return ep;
91 }
92 }
93 spin_unlock(&scif_info.eplock);
94 return NULL;
95}
96
Sudeep Dutte9089f42015-04-29 05:32:35 -070097void scif_cleanup_zombie_epd(void)
98{
99 struct list_head *pos, *tmpq;
100 struct scif_endpt *ep;
101
102 spin_lock(&scif_info.eplock);
103 list_for_each_safe(pos, tmpq, &scif_info.zombie) {
104 ep = list_entry(pos, struct scif_endpt, list);
105 list_del(pos);
106 scif_info.nr_zombies--;
107 kfree(ep);
108 }
109 spin_unlock(&scif_info.eplock);
110}
Nikhil Rao76371c72015-04-29 05:32:36 -0700111
112/**
113 * scif_cnctreq() - Respond to SCIF_CNCT_REQ interrupt message
114 * @msg: Interrupt message
115 *
116 * This message is initiated by the remote node to request a connection
117 * to the local node. This function looks for an end point in the
118 * listen state on the requested port id.
119 *
120 * If it finds a listening port it places the connect request on the
121 * listening end points queue and wakes up any pending accept calls.
122 *
123 * If it does not find a listening end point it sends a connection
124 * reject message to the remote node.
125 */
126void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg)
127{
128 struct scif_endpt *ep = NULL;
129 struct scif_conreq *conreq;
130
131 conreq = kmalloc(sizeof(*conreq), GFP_KERNEL);
132 if (!conreq)
133 /* Lack of resources so reject the request. */
134 goto conreq_sendrej;
135
136 ep = scif_find_listen_ep(msg->dst.port);
137 if (!ep)
138 /* Send reject due to no listening ports */
139 goto conreq_sendrej_free;
140
141 if (ep->backlog <= ep->conreqcnt) {
142 /* Send reject due to too many pending requests */
143 spin_unlock(&ep->lock);
144 goto conreq_sendrej_free;
145 }
146
147 conreq->msg = *msg;
148 list_add_tail(&conreq->list, &ep->conlist);
149 ep->conreqcnt++;
150 wake_up_interruptible(&ep->conwq);
151 spin_unlock(&ep->lock);
152 return;
153
154conreq_sendrej_free:
155 kfree(conreq);
156conreq_sendrej:
157 msg->uop = SCIF_CNCT_REJ;
158 scif_nodeqp_send(&scif_dev[msg->src.node], msg);
159}
160
161/**
162 * scif_cnctgnt() - Respond to SCIF_CNCT_GNT interrupt message
163 * @msg: Interrupt message
164 *
165 * An accept() on the remote node has occurred and sent this message
166 * to indicate success. Place the end point in the MAPPING state and
167 * save the remote nodes memory information. Then wake up the connect
168 * request so it can finish.
169 */
170void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg)
171{
172 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
173
174 spin_lock(&ep->lock);
175 if (SCIFEP_CONNECTING == ep->state) {
176 ep->peer.node = msg->src.node;
177 ep->peer.port = msg->src.port;
178 ep->qp_info.gnt_pld = msg->payload[1];
179 ep->remote_ep = msg->payload[2];
180 ep->state = SCIFEP_MAPPING;
181
182 wake_up(&ep->conwq);
183 }
184 spin_unlock(&ep->lock);
185}
186
187/**
188 * scif_cnctgnt_ack() - Respond to SCIF_CNCT_GNTACK interrupt message
189 * @msg: Interrupt message
190 *
191 * The remote connection request has finished mapping the local memory.
192 * Place the connection in the connected state and wake up the pending
193 * accept() call.
194 */
195void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
196{
197 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
198
199 mutex_lock(&scif_info.connlock);
200 spin_lock(&ep->lock);
201 /* New ep is now connected with all resources set. */
202 ep->state = SCIFEP_CONNECTED;
203 list_add_tail(&ep->list, &scif_info.connected);
204 wake_up(&ep->conwq);
205 spin_unlock(&ep->lock);
206 mutex_unlock(&scif_info.connlock);
207}
208
209/**
210 * scif_cnctgnt_nack() - Respond to SCIF_CNCT_GNTNACK interrupt message
211 * @msg: Interrupt message
212 *
213 * The remote connection request failed to map the local memory it was sent.
214 * Place the end point in the CLOSING state to indicate it and wake up
215 * the pending accept();
216 */
217void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg)
218{
219 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
220
221 spin_lock(&ep->lock);
222 ep->state = SCIFEP_CLOSING;
223 wake_up(&ep->conwq);
224 spin_unlock(&ep->lock);
225}
226
227/**
228 * scif_cnctrej() - Respond to SCIF_CNCT_REJ interrupt message
229 * @msg: Interrupt message
230 *
231 * The remote end has rejected the connection request. Set the end
232 * point back to the bound state and wake up the pending connect().
233 */
234void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg)
235{
236 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
237
238 spin_lock(&ep->lock);
239 if (SCIFEP_CONNECTING == ep->state) {
240 ep->state = SCIFEP_BOUND;
241 wake_up(&ep->conwq);
242 }
243 spin_unlock(&ep->lock);
244}
245
246/**
247 * scif_discnct() - Respond to SCIF_DISCNCT interrupt message
248 * @msg: Interrupt message
249 *
250 * The remote node has indicated close() has been called on its end
251 * point. Remove the local end point from the connected list, set its
252 * state to disconnected and ensure accesses to the remote node are
253 * shutdown.
254 *
255 * When all accesses to the remote end have completed then send a
256 * DISCNT_ACK to indicate it can remove its resources and complete
257 * the close routine.
258 */
259void scif_discnct(struct scif_dev *scifdev, struct scifmsg *msg)
260{
261 struct scif_endpt *ep = NULL;
262 struct scif_endpt *tmpep;
263 struct list_head *pos, *tmpq;
264
265 mutex_lock(&scif_info.connlock);
266 list_for_each_safe(pos, tmpq, &scif_info.connected) {
267 tmpep = list_entry(pos, struct scif_endpt, list);
268 /*
269 * The local ep may have sent a disconnect and and been closed
270 * due to a message response time out. It may have been
271 * allocated again and formed a new connection so we want to
272 * check if the remote ep matches
273 */
274 if (((u64)tmpep == msg->payload[1]) &&
275 ((u64)tmpep->remote_ep == msg->payload[0])) {
276 list_del(pos);
277 ep = tmpep;
278 spin_lock(&ep->lock);
279 break;
280 }
281 }
282
283 /*
284 * If the terminated end is not found then this side started closing
285 * before the other side sent the disconnect. If so the ep will no
286 * longer be on the connected list. Regardless the other side
287 * needs to be acked to let it know close is complete.
288 */
289 if (!ep) {
290 mutex_unlock(&scif_info.connlock);
291 goto discnct_ack;
292 }
293
294 ep->state = SCIFEP_DISCONNECTED;
295 list_add_tail(&ep->list, &scif_info.disconnected);
296
297 wake_up_interruptible(&ep->sendwq);
298 wake_up_interruptible(&ep->recvwq);
299 spin_unlock(&ep->lock);
300 mutex_unlock(&scif_info.connlock);
301
302discnct_ack:
303 msg->uop = SCIF_DISCNT_ACK;
304 scif_nodeqp_send(&scif_dev[msg->src.node], msg);
305}
306
307/**
308 * scif_discnct_ack() - Respond to SCIF_DISCNT_ACK interrupt message
309 * @msg: Interrupt message
310 *
311 * Remote side has indicated it has not more references to local resources
312 */
313void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
314{
315 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
316
317 spin_lock(&ep->lock);
318 ep->state = SCIFEP_DISCONNECTED;
319 spin_unlock(&ep->lock);
320 complete(&ep->discon);
321}
Sudeep Duttfdd9fd52015-04-29 05:32:37 -0700322
323/**
324 * scif_clientsend() - Respond to SCIF_CLIENT_SEND interrupt message
325 * @msg: Interrupt message
326 *
327 * Remote side is confirming send or receive interrupt handling is complete.
328 */
329void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg)
330{
331 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
332
333 spin_lock(&ep->lock);
334 if (SCIFEP_CONNECTED == ep->state)
335 wake_up_interruptible(&ep->recvwq);
336 spin_unlock(&ep->lock);
337}
338
339/**
340 * scif_clientrcvd() - Respond to SCIF_CLIENT_RCVD interrupt message
341 * @msg: Interrupt message
342 *
343 * Remote side is confirming send or receive interrupt handling is complete.
344 */
345void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg)
346{
347 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
348
349 spin_lock(&ep->lock);
350 if (SCIFEP_CONNECTED == ep->state)
351 wake_up_interruptible(&ep->sendwq);
352 spin_unlock(&ep->lock);
353}