Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015 HGST, a Western Digital Company. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. |
| 12 | */ |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/err.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <rdma/ib_verbs.h> |
| 17 | |
| 18 | /* # of WCs to poll for with a single call to ib_poll_cq */ |
| 19 | #define IB_POLL_BATCH 16 |
Max Gurtovoy | d3b9e8a | 2018-03-05 20:09:48 +0200 | [diff] [blame] | 20 | #define IB_POLL_BATCH_DIRECT 8 |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 21 | |
| 22 | /* # of WCs to iterate over before yielding */ |
| 23 | #define IB_POLL_BUDGET_IRQ 256 |
| 24 | #define IB_POLL_BUDGET_WORKQUEUE 65536 |
| 25 | |
| 26 | #define IB_POLL_FLAGS \ |
| 27 | (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) |
| 28 | |
Max Gurtovoy | d3b9e8a | 2018-03-05 20:09:48 +0200 | [diff] [blame] | 29 | static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, |
| 30 | int batch) |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 31 | { |
| 32 | int i, n, completed = 0; |
| 33 | |
Sagi Grimberg | fedd9e1 | 2017-03-16 18:57:00 +0200 | [diff] [blame] | 34 | /* |
| 35 | * budget might be (-1) if the caller does not |
| 36 | * want to bound this call, thus we need unsigned |
| 37 | * minimum here. |
| 38 | */ |
Max Gurtovoy | d3b9e8a | 2018-03-05 20:09:48 +0200 | [diff] [blame] | 39 | while ((n = ib_poll_cq(cq, min_t(u32, batch, |
| 40 | budget - completed), wcs)) > 0) { |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 41 | for (i = 0; i < n; i++) { |
Sagi Grimberg | 246d8b1 | 2018-01-14 17:07:50 +0200 | [diff] [blame] | 42 | struct ib_wc *wc = &wcs[i]; |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 43 | |
| 44 | if (wc->wr_cqe) |
| 45 | wc->wr_cqe->done(cq, wc); |
| 46 | else |
| 47 | WARN_ON_ONCE(wc->status == IB_WC_SUCCESS); |
| 48 | } |
| 49 | |
| 50 | completed += n; |
| 51 | |
Max Gurtovoy | d3b9e8a | 2018-03-05 20:09:48 +0200 | [diff] [blame] | 52 | if (n != batch || (budget != -1 && completed >= budget)) |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 53 | break; |
| 54 | } |
| 55 | |
| 56 | return completed; |
| 57 | } |
| 58 | |
| 59 | /** |
| 60 | * ib_process_direct_cq - process a CQ in caller context |
| 61 | * @cq: CQ to process |
| 62 | * @budget: number of CQEs to poll for |
| 63 | * |
Sagi Grimberg | 246d8b1 | 2018-01-14 17:07:50 +0200 | [diff] [blame] | 64 | * This function is used to process all outstanding CQ entries. |
| 65 | * It does not offload CQ processing to a different context and does |
| 66 | * not ask for completion interrupts from the HCA. |
| 67 | * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger |
| 68 | * concurrent processing. |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 69 | * |
Bart Van Assche | f039f44 | 2017-02-14 10:56:35 -0800 | [diff] [blame] | 70 | * Note: do not pass -1 as %budget unless it is guaranteed that the number |
| 71 | * of completions that will be processed is small. |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 72 | */ |
| 73 | int ib_process_cq_direct(struct ib_cq *cq, int budget) |
| 74 | { |
Max Gurtovoy | d3b9e8a | 2018-03-05 20:09:48 +0200 | [diff] [blame] | 75 | struct ib_wc wcs[IB_POLL_BATCH_DIRECT]; |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 76 | |
Max Gurtovoy | d3b9e8a | 2018-03-05 20:09:48 +0200 | [diff] [blame] | 77 | return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 78 | } |
| 79 | EXPORT_SYMBOL(ib_process_cq_direct); |
| 80 | |
| 81 | static void ib_cq_completion_direct(struct ib_cq *cq, void *private) |
| 82 | { |
| 83 | WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq); |
| 84 | } |
| 85 | |
| 86 | static int ib_poll_handler(struct irq_poll *iop, int budget) |
| 87 | { |
| 88 | struct ib_cq *cq = container_of(iop, struct ib_cq, iop); |
| 89 | int completed; |
| 90 | |
Max Gurtovoy | d3b9e8a | 2018-03-05 20:09:48 +0200 | [diff] [blame] | 91 | completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 92 | if (completed < budget) { |
| 93 | irq_poll_complete(&cq->iop); |
| 94 | if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) |
| 95 | irq_poll_sched(&cq->iop); |
| 96 | } |
| 97 | |
| 98 | return completed; |
| 99 | } |
| 100 | |
| 101 | static void ib_cq_completion_softirq(struct ib_cq *cq, void *private) |
| 102 | { |
| 103 | irq_poll_sched(&cq->iop); |
| 104 | } |
| 105 | |
| 106 | static void ib_cq_poll_work(struct work_struct *work) |
| 107 | { |
| 108 | struct ib_cq *cq = container_of(work, struct ib_cq, work); |
| 109 | int completed; |
| 110 | |
Max Gurtovoy | d3b9e8a | 2018-03-05 20:09:48 +0200 | [diff] [blame] | 111 | completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, |
| 112 | IB_POLL_BATCH); |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 113 | if (completed >= IB_POLL_BUDGET_WORKQUEUE || |
| 114 | ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) |
| 115 | queue_work(ib_comp_wq, &cq->work); |
| 116 | } |
| 117 | |
| 118 | static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) |
| 119 | { |
| 120 | queue_work(ib_comp_wq, &cq->work); |
| 121 | } |
| 122 | |
| 123 | /** |
Leon Romanovsky | f66c8ba | 2018-01-28 11:17:19 +0200 | [diff] [blame] | 124 | * __ib_alloc_cq - allocate a completion queue |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 125 | * @dev: device to allocate the CQ for |
| 126 | * @private: driver private data, accessible from cq->cq_context |
| 127 | * @nr_cqe: number of CQEs to allocate |
| 128 | * @comp_vector: HCA completion vectors for this CQ |
| 129 | * @poll_ctx: context to poll the CQ from. |
Leon Romanovsky | f66c8ba | 2018-01-28 11:17:19 +0200 | [diff] [blame] | 130 | * @caller: module owner name. |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 131 | * |
| 132 | * This is the proper interface to allocate a CQ for in-kernel users. A |
| 133 | * CQ allocated with this interface will automatically be polled from the |
Yuval Shaia | 6c6e51a | 2017-01-04 22:17:14 +0200 | [diff] [blame] | 134 | * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 135 | * to use this CQ abstraction. |
| 136 | */ |
Leon Romanovsky | f66c8ba | 2018-01-28 11:17:19 +0200 | [diff] [blame] | 137 | struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, |
| 138 | int nr_cqe, int comp_vector, |
| 139 | enum ib_poll_context poll_ctx, const char *caller) |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 140 | { |
| 141 | struct ib_cq_init_attr cq_attr = { |
| 142 | .cqe = nr_cqe, |
| 143 | .comp_vector = comp_vector, |
| 144 | }; |
| 145 | struct ib_cq *cq; |
| 146 | int ret = -ENOMEM; |
| 147 | |
| 148 | cq = dev->create_cq(dev, &cq_attr, NULL, NULL); |
| 149 | if (IS_ERR(cq)) |
| 150 | return cq; |
| 151 | |
| 152 | cq->device = dev; |
| 153 | cq->uobject = NULL; |
| 154 | cq->event_handler = NULL; |
| 155 | cq->cq_context = private; |
| 156 | cq->poll_ctx = poll_ctx; |
| 157 | atomic_set(&cq->usecnt, 0); |
| 158 | |
| 159 | cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL); |
| 160 | if (!cq->wc) |
| 161 | goto out_destroy_cq; |
| 162 | |
Leon Romanovsky | 08f294a | 2018-01-28 11:17:22 +0200 | [diff] [blame] | 163 | cq->res.type = RDMA_RESTRACK_CQ; |
| 164 | cq->res.kern_name = caller; |
| 165 | rdma_restrack_add(&cq->res); |
| 166 | |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 167 | switch (cq->poll_ctx) { |
| 168 | case IB_POLL_DIRECT: |
| 169 | cq->comp_handler = ib_cq_completion_direct; |
| 170 | break; |
| 171 | case IB_POLL_SOFTIRQ: |
| 172 | cq->comp_handler = ib_cq_completion_softirq; |
| 173 | |
| 174 | irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler); |
| 175 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); |
| 176 | break; |
| 177 | case IB_POLL_WORKQUEUE: |
| 178 | cq->comp_handler = ib_cq_completion_workqueue; |
| 179 | INIT_WORK(&cq->work, ib_cq_poll_work); |
| 180 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); |
| 181 | break; |
| 182 | default: |
| 183 | ret = -EINVAL; |
| 184 | goto out_free_wc; |
| 185 | } |
| 186 | |
| 187 | return cq; |
| 188 | |
| 189 | out_free_wc: |
| 190 | kfree(cq->wc); |
Leon Romanovsky | 08f294a | 2018-01-28 11:17:22 +0200 | [diff] [blame] | 191 | rdma_restrack_del(&cq->res); |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 192 | out_destroy_cq: |
| 193 | cq->device->destroy_cq(cq); |
| 194 | return ERR_PTR(ret); |
| 195 | } |
Leon Romanovsky | f66c8ba | 2018-01-28 11:17:19 +0200 | [diff] [blame] | 196 | EXPORT_SYMBOL(__ib_alloc_cq); |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 197 | |
| 198 | /** |
| 199 | * ib_free_cq - free a completion queue |
| 200 | * @cq: completion queue to free. |
| 201 | */ |
| 202 | void ib_free_cq(struct ib_cq *cq) |
| 203 | { |
| 204 | int ret; |
| 205 | |
| 206 | if (WARN_ON_ONCE(atomic_read(&cq->usecnt))) |
| 207 | return; |
| 208 | |
| 209 | switch (cq->poll_ctx) { |
| 210 | case IB_POLL_DIRECT: |
| 211 | break; |
| 212 | case IB_POLL_SOFTIRQ: |
| 213 | irq_poll_disable(&cq->iop); |
| 214 | break; |
| 215 | case IB_POLL_WORKQUEUE: |
Sagi Grimberg | 86f46ab | 2017-03-08 22:00:52 +0200 | [diff] [blame] | 216 | cancel_work_sync(&cq->work); |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 217 | break; |
| 218 | default: |
| 219 | WARN_ON_ONCE(1); |
| 220 | } |
| 221 | |
| 222 | kfree(cq->wc); |
Leon Romanovsky | 08f294a | 2018-01-28 11:17:22 +0200 | [diff] [blame] | 223 | rdma_restrack_del(&cq->res); |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 224 | ret = cq->device->destroy_cq(cq); |
| 225 | WARN_ON_ONCE(ret); |
| 226 | } |
| 227 | EXPORT_SYMBOL(ib_free_cq); |