Sudeep Dutt | e9089f4 | 2015-04-29 05:32:35 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Intel MIC Platform Software Stack (MPSS) |
| 3 | * |
| 4 | * Copyright(c) 2014 Intel Corporation. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License, version 2, as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, but |
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | * General Public License for more details. |
| 14 | * |
| 15 | * Intel SCIF driver. |
| 16 | * |
| 17 | */ |
| 18 | #include "scif_main.h" |
| 19 | #include "scif_map.h" |
| 20 | |
| 21 | void scif_cleanup_ep_qp(struct scif_endpt *ep) |
| 22 | { |
| 23 | struct scif_qp *qp = ep->qp_info.qp; |
| 24 | |
| 25 | if (qp->outbound_q.rb_base) { |
| 26 | scif_iounmap((void *)qp->outbound_q.rb_base, |
| 27 | qp->outbound_q.size, ep->remote_dev); |
| 28 | qp->outbound_q.rb_base = NULL; |
| 29 | } |
| 30 | if (qp->remote_qp) { |
| 31 | scif_iounmap((void *)qp->remote_qp, |
| 32 | sizeof(struct scif_qp), ep->remote_dev); |
| 33 | qp->remote_qp = NULL; |
| 34 | } |
| 35 | if (qp->local_qp) { |
| 36 | scif_unmap_single(qp->local_qp, ep->remote_dev, |
| 37 | sizeof(struct scif_qp)); |
| 38 | qp->local_qp = 0x0; |
| 39 | } |
| 40 | if (qp->local_buf) { |
| 41 | scif_unmap_single(qp->local_buf, ep->remote_dev, |
| 42 | SCIF_ENDPT_QP_SIZE); |
| 43 | qp->local_buf = 0; |
| 44 | } |
| 45 | } |
| 46 | |
| 47 | void scif_teardown_ep(void *endpt) |
| 48 | { |
| 49 | struct scif_endpt *ep = endpt; |
| 50 | struct scif_qp *qp = ep->qp_info.qp; |
| 51 | |
| 52 | if (qp) { |
| 53 | spin_lock(&ep->lock); |
| 54 | scif_cleanup_ep_qp(ep); |
| 55 | spin_unlock(&ep->lock); |
| 56 | kfree(qp->inbound_q.rb_base); |
| 57 | kfree(qp); |
| 58 | } |
| 59 | } |
| 60 | |
| 61 | /* |
| 62 | * Enqueue the endpoint to the zombie list for cleanup. |
| 63 | * The endpoint should not be accessed once this API returns. |
| 64 | */ |
| 65 | void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held) |
| 66 | { |
| 67 | if (!eplock_held) |
| 68 | spin_lock(&scif_info.eplock); |
| 69 | spin_lock(&ep->lock); |
| 70 | ep->state = SCIFEP_ZOMBIE; |
| 71 | spin_unlock(&ep->lock); |
| 72 | list_add_tail(&ep->list, &scif_info.zombie); |
| 73 | scif_info.nr_zombies++; |
| 74 | if (!eplock_held) |
| 75 | spin_unlock(&scif_info.eplock); |
| 76 | schedule_work(&scif_info.misc_work); |
| 77 | } |
| 78 | |
| 79 | void scif_cleanup_zombie_epd(void) |
| 80 | { |
| 81 | struct list_head *pos, *tmpq; |
| 82 | struct scif_endpt *ep; |
| 83 | |
| 84 | spin_lock(&scif_info.eplock); |
| 85 | list_for_each_safe(pos, tmpq, &scif_info.zombie) { |
| 86 | ep = list_entry(pos, struct scif_endpt, list); |
| 87 | list_del(pos); |
| 88 | scif_info.nr_zombies--; |
| 89 | kfree(ep); |
| 90 | } |
| 91 | spin_unlock(&scif_info.eplock); |
| 92 | } |