blob: 17dd6ab193fa79403ce6e5f46b8fd66810a454bd [file] [log] [blame]
Dennis Dalessandrob518d3e2016-01-06 09:56:15 -08001/*
2 * Copyright(c) 2015 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -080048#include <linux/bitops.h>
49#include <linux/lockdep.h>
50#include "vt.h"
Dennis Dalessandrob518d3e2016-01-06 09:56:15 -080051#include "qp.h"
52
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -080053static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
54{
55 unsigned long page = get_zeroed_page(GFP_KERNEL);
56
57 /*
58 * Free the page if someone raced with us installing it.
59 */
60
61 spin_lock(&qpt->lock);
62 if (map->page)
63 free_page(page);
64 else
65 map->page = (void *)page;
66 spin_unlock(&qpt->lock);
67}
68
69/**
70 * init_qpn_table - initialize the QP number table for a device
71 * @qpt: the QPN table
72 */
73static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
74{
75 u32 offset, i;
76 struct rvt_qpn_map *map;
77 int ret = 0;
78
79 if (!(rdi->dparms.qpn_res_end > rdi->dparms.qpn_res_start))
80 return -EINVAL;
81
82 spin_lock_init(&qpt->lock);
83
84 qpt->last = rdi->dparms.qpn_start;
85 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
86
87 /*
88 * Drivers may want some QPs beyond what we need for verbs let them use
89 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
90 * for those. The reserved range must be *after* the range which verbs
91 * will pick from.
92 */
93
94 /* Figure out number of bit maps needed before reserved range */
95 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
96
97 /* This should always be zero */
98 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
99
100 /* Starting with the first reserved bit map */
101 map = &qpt->map[qpt->nmaps];
102
103 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
104 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
105 for (i = rdi->dparms.qpn_res_start; i < rdi->dparms.qpn_res_end; i++) {
106 if (!map->page) {
107 get_map_page(qpt, map);
108 if (!map->page) {
109 ret = -ENOMEM;
110 break;
111 }
112 }
113 set_bit(offset, map->page);
114 offset++;
115 if (offset == RVT_BITS_PER_PAGE) {
116 /* next page */
117 qpt->nmaps++;
118 map++;
119 offset = 0;
120 }
121 }
122 return ret;
123}
124
125/**
126 * free_qpn_table - free the QP number table for a device
127 * @qpt: the QPN table
128 */
129static void free_qpn_table(struct rvt_qpn_table *qpt)
130{
131 int i;
132
133 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
134 free_page((unsigned long)qpt->map[i].page);
135}
136
137int rvt_driver_qp_init(struct rvt_dev_info *rdi)
138{
139 int i;
140 int ret = -ENOMEM;
141
142 if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER) {
143 rvt_pr_info(rdi, "Driver is doing QP init.\n");
144 return 0;
145 }
146
147 if (!rdi->dparms.qp_table_size)
148 return -EINVAL;
149
150 /*
151 * If driver is not doing any QP allocation then make sure it is
152 * providing the necessary QP functions.
153 */
154 if (!rdi->driver_f.free_all_qps)
155 return -EINVAL;
156
157 /* allocate parent object */
158 rdi->qp_dev = kzalloc(sizeof(*rdi->qp_dev), GFP_KERNEL);
159 if (!rdi->qp_dev)
160 return -ENOMEM;
161
162 /* allocate hash table */
163 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
164 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
165 rdi->qp_dev->qp_table =
166 kmalloc(rdi->qp_dev->qp_table_size *
167 sizeof(*rdi->qp_dev->qp_table),
168 GFP_KERNEL);
169 if (!rdi->qp_dev->qp_table)
170 goto no_qp_table;
171
172 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
173 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
174
175 spin_lock_init(&rdi->qp_dev->qpt_lock);
176
177 /* initialize qpn map */
178 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
179 goto fail_table;
180
181 return ret;
182
183fail_table:
184 kfree(rdi->qp_dev->qp_table);
185 free_qpn_table(&rdi->qp_dev->qpn_table);
186
187no_qp_table:
188 kfree(rdi->qp_dev);
189
190 return ret;
191}
192
193/**
194 * free_all_qps - check for QPs still in use
195 * @qpt: the QP table to empty
196 *
197 * There should not be any QPs still in use.
198 * Free memory for table.
199 */
200static unsigned free_all_qps(struct rvt_dev_info *rdi)
201{
202 unsigned long flags;
203 struct rvt_qp *qp;
204 unsigned n, qp_inuse = 0;
205 spinlock_t *ql; /* work around too long line below */
206
207 rdi->driver_f.free_all_qps(rdi);
208
209 if (!rdi->qp_dev)
210 return 0;
211
212 ql = &rdi->qp_dev->qpt_lock;
213 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
214 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
215 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
216 lockdep_is_held(ql));
217 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
218 qp = rcu_dereference_protected(qp->next,
219 lockdep_is_held(ql));
220 while (qp) {
221 qp_inuse++;
222 qp = rcu_dereference_protected(qp->next,
223 lockdep_is_held(ql));
224 }
225 }
226 spin_unlock_irqrestore(ql, flags);
227 synchronize_rcu();
228 return qp_inuse;
229}
230
231void rvt_qp_exit(struct rvt_dev_info *rdi)
232{
233 u32 qps_inuse = free_all_qps(rdi);
234
235 qps_inuse = free_all_qps(rdi);
236 if (qps_inuse)
237 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
238 qps_inuse);
239 if (!rdi->qp_dev)
240 return;
241
242 kfree(rdi->qp_dev->qp_table);
243 free_qpn_table(&rdi->qp_dev->qpn_table);
244 kfree(rdi->qp_dev);
245}
246
Dennis Dalessandrob518d3e2016-01-06 09:56:15 -0800247/**
248 * rvt_create_qp - create a queue pair for a device
249 * @ibpd: the protection domain who's device we create the queue pair for
250 * @init_attr: the attributes of the queue pair
251 * @udata: user data for libibverbs.so
252 *
253 * Returns the queue pair on success, otherwise returns an errno.
254 *
255 * Called by the ib_create_qp() core verbs function.
256 */
257struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
258 struct ib_qp_init_attr *init_attr,
259 struct ib_udata *udata)
260{
261 /*
262 * Queue pair creation is mostly an rvt issue. However, drivers have
263 * their own unique idea of what queue pare numbers mean. For instance
264 * there is a reserved range for PSM.
265 *
266 * VI-DRIVER-API: make_qpn()
267 * Returns a valid QPN for verbs to use
268 */
269 return ERR_PTR(-EOPNOTSUPP);
270}
271
272/**
273 * qib_modify_qp - modify the attributes of a queue pair
274 * @ibqp: the queue pair who's attributes we're modifying
275 * @attr: the new attributes
276 * @attr_mask: the mask of attributes to modify
277 * @udata: user data for libibverbs.so
278 *
279 * Returns 0 on success, otherwise returns an errno.
280 */
281int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
282 int attr_mask, struct ib_udata *udata)
283{
284 /*
285 * VT-DRIVER-API: qp_mtu()
286 * OPA devices have a per VL MTU the driver has a mapping of IB SL to SC
287 * to VL and the mapping table of MTUs per VL. This is not something
288 * that IB has and should not live in the rvt.
289 */
290 return -EOPNOTSUPP;
291}
292
293/**
294 * rvt_destroy_qp - destroy a queue pair
295 * @ibqp: the queue pair to destroy
296 *
297 * Returns 0 on success.
298 *
299 * Note that this can be called while the QP is actively sending or
300 * receiving!
301 */
302int rvt_destroy_qp(struct ib_qp *ibqp)
303{
304 /*
305 * VT-DRIVER-API: qp_flush()
306 * Driver provies a mechanism to flush and wait for that flush to
307 * finish.
308 */
309
310 return -EOPNOTSUPP;
311}
312
313int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
314 int attr_mask, struct ib_qp_init_attr *init_attr)
315{
316 return -EOPNOTSUPP;
317}
Dennis Dalessandro8cf40202016-01-06 10:01:17 -0800318
319/**
320 * rvt_post_receive - post a receive on a QP
321 * @ibqp: the QP to post the receive on
322 * @wr: the WR to post
323 * @bad_wr: the first bad WR is put here
324 *
325 * This may be called from interrupt context.
326 */
327int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
328 struct ib_recv_wr **bad_wr)
329{
330 /*
331 * When a packet arrives the driver needs to call up to rvt to process
332 * the packet. The UD, RC, UC processing will be done in rvt, however
333 * the driver should be able to override this if it so choses. Perhaps a
334 * set of function pointers set up at registration time.
335 */
336
337 return -EOPNOTSUPP;
338}
339
340/**
341 * rvt_post_send - post a send on a QP
342 * @ibqp: the QP to post the send on
343 * @wr: the list of work requests to post
344 * @bad_wr: the first bad WR is put here
345 *
346 * This may be called from interrupt context.
347 */
348int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
349 struct ib_send_wr **bad_wr)
350{
351 /*
352 * VT-DRIVER-API: do_send()
353 * Driver needs to have a do_send() call which is a single entry point
354 * to take an already formed packet and throw it out on the wire. Once
355 * the packet is sent the driver needs to make an upcall to rvt so the
356 * completion queue can be notified and/or any other outstanding
357 * work/book keeping can be finished.
358 *
359 * Note that there should also be a way for rvt to protect itself
360 * against hangs in the driver layer. If a send doesn't actually
361 * complete in a timely manor rvt needs to return an error event.
362 */
363
364 return -EOPNOTSUPP;
365}
366
367/**
368 * rvt_post_srq_receive - post a receive on a shared receive queue
369 * @ibsrq: the SRQ to post the receive on
370 * @wr: the list of work requests to post
371 * @bad_wr: A pointer to the first WR to cause a problem is put here
372 *
373 * This may be called from interrupt context.
374 */
375int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
376 struct ib_recv_wr **bad_wr)
377{
378 return -EOPNOTSUPP;
379}