blob: c8c5a5a7f4336ce51185854044fe3196bd0c8589 [file] [log] [blame]
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001/*
2 * Copyright (c) 2015 HGST, a Western Digital Company.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13#include <linux/module.h>
14#include <linux/err.h>
15#include <linux/slab.h>
16#include <rdma/ib_verbs.h>
17
18/* # of WCs to poll for with a single call to ib_poll_cq */
19#define IB_POLL_BATCH 16
20
21/* # of WCs to iterate over before yielding */
22#define IB_POLL_BUDGET_IRQ 256
23#define IB_POLL_BUDGET_WORKQUEUE 65536
24
25#define IB_POLL_FLAGS \
26 (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
27
Sagi Grimberg246d8b12018-01-14 17:07:50 +020028static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080029{
30 int i, n, completed = 0;
Sagi Grimberg246d8b12018-01-14 17:07:50 +020031 struct ib_wc *wcs = poll_wc ? : cq->wc;
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080032
Sagi Grimbergfedd9e12017-03-16 18:57:00 +020033 /*
34 * budget might be (-1) if the caller does not
35 * want to bound this call, thus we need unsigned
36 * minimum here.
37 */
38 while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH,
Sagi Grimberg246d8b12018-01-14 17:07:50 +020039 budget - completed), wcs)) > 0) {
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080040 for (i = 0; i < n; i++) {
Sagi Grimberg246d8b12018-01-14 17:07:50 +020041 struct ib_wc *wc = &wcs[i];
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080042
43 if (wc->wr_cqe)
44 wc->wr_cqe->done(cq, wc);
45 else
46 WARN_ON_ONCE(wc->status == IB_WC_SUCCESS);
47 }
48
49 completed += n;
50
51 if (n != IB_POLL_BATCH ||
52 (budget != -1 && completed >= budget))
53 break;
54 }
55
56 return completed;
57}
58
59/**
60 * ib_process_direct_cq - process a CQ in caller context
61 * @cq: CQ to process
62 * @budget: number of CQEs to poll for
63 *
Sagi Grimberg246d8b12018-01-14 17:07:50 +020064 * This function is used to process all outstanding CQ entries.
65 * It does not offload CQ processing to a different context and does
66 * not ask for completion interrupts from the HCA.
67 * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger
68 * concurrent processing.
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080069 *
Bart Van Asschef039f442017-02-14 10:56:35 -080070 * Note: do not pass -1 as %budget unless it is guaranteed that the number
71 * of completions that will be processed is small.
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080072 */
73int ib_process_cq_direct(struct ib_cq *cq, int budget)
74{
Sagi Grimberg246d8b12018-01-14 17:07:50 +020075 struct ib_wc wcs[IB_POLL_BATCH];
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080076
Sagi Grimberg246d8b12018-01-14 17:07:50 +020077 return __ib_process_cq(cq, budget, wcs);
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080078}
79EXPORT_SYMBOL(ib_process_cq_direct);
80
81static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
82{
83 WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq);
84}
85
86static int ib_poll_handler(struct irq_poll *iop, int budget)
87{
88 struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
89 int completed;
90
Sagi Grimberg246d8b12018-01-14 17:07:50 +020091 completed = __ib_process_cq(cq, budget, NULL);
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080092 if (completed < budget) {
93 irq_poll_complete(&cq->iop);
94 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
95 irq_poll_sched(&cq->iop);
96 }
97
98 return completed;
99}
100
101static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
102{
103 irq_poll_sched(&cq->iop);
104}
105
106static void ib_cq_poll_work(struct work_struct *work)
107{
108 struct ib_cq *cq = container_of(work, struct ib_cq, work);
109 int completed;
110
Sagi Grimberg246d8b12018-01-14 17:07:50 +0200111 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL);
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -0800112 if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
113 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
114 queue_work(ib_comp_wq, &cq->work);
115}
116
117static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
118{
119 queue_work(ib_comp_wq, &cq->work);
120}
121
122/**
123 * ib_alloc_cq - allocate a completion queue
124 * @dev: device to allocate the CQ for
125 * @private: driver private data, accessible from cq->cq_context
126 * @nr_cqe: number of CQEs to allocate
127 * @comp_vector: HCA completion vectors for this CQ
128 * @poll_ctx: context to poll the CQ from.
129 *
130 * This is the proper interface to allocate a CQ for in-kernel users. A
131 * CQ allocated with this interface will automatically be polled from the
Yuval Shaia6c6e51a2017-01-04 22:17:14 +0200132 * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -0800133 * to use this CQ abstraction.
134 */
135struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
136 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx)
137{
138 struct ib_cq_init_attr cq_attr = {
139 .cqe = nr_cqe,
140 .comp_vector = comp_vector,
141 };
142 struct ib_cq *cq;
143 int ret = -ENOMEM;
144
145 cq = dev->create_cq(dev, &cq_attr, NULL, NULL);
146 if (IS_ERR(cq))
147 return cq;
148
149 cq->device = dev;
150 cq->uobject = NULL;
151 cq->event_handler = NULL;
152 cq->cq_context = private;
153 cq->poll_ctx = poll_ctx;
154 atomic_set(&cq->usecnt, 0);
155
156 cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL);
157 if (!cq->wc)
158 goto out_destroy_cq;
159
160 switch (cq->poll_ctx) {
161 case IB_POLL_DIRECT:
162 cq->comp_handler = ib_cq_completion_direct;
163 break;
164 case IB_POLL_SOFTIRQ:
165 cq->comp_handler = ib_cq_completion_softirq;
166
167 irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
168 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
169 break;
170 case IB_POLL_WORKQUEUE:
171 cq->comp_handler = ib_cq_completion_workqueue;
172 INIT_WORK(&cq->work, ib_cq_poll_work);
173 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
174 break;
175 default:
176 ret = -EINVAL;
177 goto out_free_wc;
178 }
179
180 return cq;
181
182out_free_wc:
183 kfree(cq->wc);
184out_destroy_cq:
185 cq->device->destroy_cq(cq);
186 return ERR_PTR(ret);
187}
188EXPORT_SYMBOL(ib_alloc_cq);
189
190/**
191 * ib_free_cq - free a completion queue
192 * @cq: completion queue to free.
193 */
194void ib_free_cq(struct ib_cq *cq)
195{
196 int ret;
197
198 if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
199 return;
200
201 switch (cq->poll_ctx) {
202 case IB_POLL_DIRECT:
203 break;
204 case IB_POLL_SOFTIRQ:
205 irq_poll_disable(&cq->iop);
206 break;
207 case IB_POLL_WORKQUEUE:
Sagi Grimberg86f46ab2017-03-08 22:00:52 +0200208 cancel_work_sync(&cq->work);
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -0800209 break;
210 default:
211 WARN_ON_ONCE(1);
212 }
213
214 kfree(cq->wc);
215 ret = cq->device->destroy_cq(cq);
216 WARN_ON_ONCE(ret);
217}
218EXPORT_SYMBOL(ib_free_cq);