blob: 91bd20eb59b7e01301665693079e926c45bbbb05 [file] [log] [blame]
Haggai Erand16e91d2016-02-29 15:45:05 +02001/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "mlx5_ib.h"
34
35struct mlx5_ib_gsi_qp {
36 struct ib_qp ibqp;
37 struct ib_qp *rx_qp;
38 u8 port_num;
39 struct ib_qp_cap cap;
40 enum ib_sig_type sq_sig_type;
41 /* Serialize qp state modifications */
42 struct mutex mutex;
Haggai Eranebab41c2016-02-29 15:45:06 +020043 int num_qps;
44 /* Protects access to the tx_qps. Post send operations synchronize
45 * with tx_qp creation in setup_qp().
46 */
47 spinlock_t lock;
48 struct ib_qp **tx_qps;
Haggai Erand16e91d2016-02-29 15:45:05 +020049};
50
51static struct mlx5_ib_gsi_qp *gsi_qp(struct ib_qp *qp)
52{
53 return container_of(qp, struct mlx5_ib_gsi_qp, ibqp);
54}
55
Haggai Eranebab41c2016-02-29 15:45:06 +020056static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
57{
58 return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
59}
60
Haggai Erand16e91d2016-02-29 15:45:05 +020061struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
62 struct ib_qp_init_attr *init_attr)
63{
64 struct mlx5_ib_dev *dev = to_mdev(pd->device);
65 struct mlx5_ib_gsi_qp *gsi;
66 struct ib_qp_init_attr hw_init_attr = *init_attr;
67 const u8 port_num = init_attr->port_num;
Haggai Eranebab41c2016-02-29 15:45:06 +020068 const int num_pkeys = pd->device->attrs.max_pkeys;
69 const int num_qps = mlx5_ib_deth_sqpn_cap(dev) ? num_pkeys : 0;
Haggai Erand16e91d2016-02-29 15:45:05 +020070 int ret;
71
72 mlx5_ib_dbg(dev, "creating GSI QP\n");
73
74 if (port_num > ARRAY_SIZE(dev->devr.ports) || port_num < 1) {
75 mlx5_ib_warn(dev,
76 "invalid port number %d during GSI QP creation\n",
77 port_num);
78 return ERR_PTR(-EINVAL);
79 }
80
81 gsi = kzalloc(sizeof(*gsi), GFP_KERNEL);
82 if (!gsi)
83 return ERR_PTR(-ENOMEM);
84
Haggai Eranebab41c2016-02-29 15:45:06 +020085 gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL);
86 if (!gsi->tx_qps) {
87 ret = -ENOMEM;
88 goto err_free;
89 }
90
Haggai Erand16e91d2016-02-29 15:45:05 +020091 mutex_init(&gsi->mutex);
92
93 mutex_lock(&dev->devr.mutex);
94
95 if (dev->devr.ports[port_num - 1].gsi) {
96 mlx5_ib_warn(dev, "GSI QP already exists on port %d\n",
97 port_num);
98 ret = -EBUSY;
Haggai Eranebab41c2016-02-29 15:45:06 +020099 goto err_free_tx;
Haggai Erand16e91d2016-02-29 15:45:05 +0200100 }
Haggai Eranebab41c2016-02-29 15:45:06 +0200101 gsi->num_qps = num_qps;
102 spin_lock_init(&gsi->lock);
Haggai Erand16e91d2016-02-29 15:45:05 +0200103
104 gsi->cap = init_attr->cap;
105 gsi->sq_sig_type = init_attr->sq_sig_type;
106 gsi->ibqp.qp_num = 1;
107 gsi->port_num = port_num;
108
109 hw_init_attr.qp_type = MLX5_IB_QPT_HW_GSI;
110 gsi->rx_qp = ib_create_qp(pd, &hw_init_attr);
111 if (IS_ERR(gsi->rx_qp)) {
112 mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n",
113 PTR_ERR(gsi->rx_qp));
114 ret = PTR_ERR(gsi->rx_qp);
Haggai Eranebab41c2016-02-29 15:45:06 +0200115 goto err_free_tx;
Haggai Erand16e91d2016-02-29 15:45:05 +0200116 }
117
118 dev->devr.ports[init_attr->port_num - 1].gsi = gsi;
119
120 mutex_unlock(&dev->devr.mutex);
121
122 return &gsi->ibqp;
123
Haggai Eranebab41c2016-02-29 15:45:06 +0200124err_free_tx:
Haggai Erand16e91d2016-02-29 15:45:05 +0200125 mutex_unlock(&dev->devr.mutex);
Haggai Eranebab41c2016-02-29 15:45:06 +0200126 kfree(gsi->tx_qps);
127err_free:
Haggai Erand16e91d2016-02-29 15:45:05 +0200128 kfree(gsi);
129 return ERR_PTR(ret);
130}
131
132int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp)
133{
134 struct mlx5_ib_dev *dev = to_mdev(qp->device);
135 struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
136 const int port_num = gsi->port_num;
Haggai Eranebab41c2016-02-29 15:45:06 +0200137 int qp_index;
Haggai Erand16e91d2016-02-29 15:45:05 +0200138 int ret;
139
140 mlx5_ib_dbg(dev, "destroying GSI QP\n");
141
142 mutex_lock(&dev->devr.mutex);
143 ret = ib_destroy_qp(gsi->rx_qp);
144 if (ret) {
145 mlx5_ib_warn(dev, "unable to destroy hardware GSI QP. error %d\n",
146 ret);
147 mutex_unlock(&dev->devr.mutex);
148 return ret;
149 }
150 dev->devr.ports[port_num - 1].gsi = NULL;
151 mutex_unlock(&dev->devr.mutex);
Haggai Eranebab41c2016-02-29 15:45:06 +0200152 gsi->rx_qp = NULL;
Haggai Erand16e91d2016-02-29 15:45:05 +0200153
Haggai Eranebab41c2016-02-29 15:45:06 +0200154 for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) {
155 if (!gsi->tx_qps[qp_index])
156 continue;
157 WARN_ON_ONCE(ib_destroy_qp(gsi->tx_qps[qp_index]));
158 gsi->tx_qps[qp_index] = NULL;
159 }
160
161 kfree(gsi->tx_qps);
Haggai Erand16e91d2016-02-29 15:45:05 +0200162 kfree(gsi);
163
164 return 0;
165}
166
Haggai Eranebab41c2016-02-29 15:45:06 +0200167static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
168{
169 struct ib_pd *pd = gsi->rx_qp->pd;
170 struct ib_qp_init_attr init_attr = {
171 .event_handler = gsi->rx_qp->event_handler,
172 .qp_context = gsi->rx_qp->qp_context,
173 .send_cq = gsi->rx_qp->send_cq,
174 .recv_cq = gsi->rx_qp->recv_cq,
175 .cap = {
176 .max_send_wr = gsi->cap.max_send_wr,
177 .max_send_sge = gsi->cap.max_send_sge,
178 .max_inline_data = gsi->cap.max_inline_data,
179 },
180 .sq_sig_type = gsi->sq_sig_type,
181 .qp_type = IB_QPT_UD,
182 .create_flags = mlx5_ib_create_qp_sqpn_qp1(),
183 };
184
185 return ib_create_qp(pd, &init_attr);
186}
187
188static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
189 u16 qp_index)
190{
191 struct mlx5_ib_dev *dev = to_mdev(qp->device);
192 struct ib_qp_attr attr;
193 int mask;
194 int ret;
195
196 mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY | IB_QP_PORT;
197 attr.qp_state = IB_QPS_INIT;
198 attr.pkey_index = qp_index;
199 attr.qkey = IB_QP1_QKEY;
200 attr.port_num = gsi->port_num;
201 ret = ib_modify_qp(qp, &attr, mask);
202 if (ret) {
203 mlx5_ib_err(dev, "could not change QP%d state to INIT: %d\n",
204 qp->qp_num, ret);
205 return ret;
206 }
207
208 attr.qp_state = IB_QPS_RTR;
209 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
210 if (ret) {
211 mlx5_ib_err(dev, "could not change QP%d state to RTR: %d\n",
212 qp->qp_num, ret);
213 return ret;
214 }
215
216 attr.qp_state = IB_QPS_RTS;
217 attr.sq_psn = 0;
218 ret = ib_modify_qp(qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
219 if (ret) {
220 mlx5_ib_err(dev, "could not change QP%d state to RTS: %d\n",
221 qp->qp_num, ret);
222 return ret;
223 }
224
225 return 0;
226}
227
228static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
229{
230 struct ib_device *device = gsi->rx_qp->device;
231 struct mlx5_ib_dev *dev = to_mdev(device);
232 struct ib_qp *qp;
233 unsigned long flags;
234 u16 pkey;
235 int ret;
236
237 ret = ib_query_pkey(device, gsi->port_num, qp_index, &pkey);
238 if (ret) {
239 mlx5_ib_warn(dev, "unable to read P_Key at port %d, index %d\n",
240 gsi->port_num, qp_index);
241 return;
242 }
243
244 if (!pkey) {
245 mlx5_ib_dbg(dev, "invalid P_Key at port %d, index %d. Skipping.\n",
246 gsi->port_num, qp_index);
247 return;
248 }
249
250 spin_lock_irqsave(&gsi->lock, flags);
251 qp = gsi->tx_qps[qp_index];
252 spin_unlock_irqrestore(&gsi->lock, flags);
253 if (qp) {
254 mlx5_ib_dbg(dev, "already existing GSI TX QP at port %d, index %d. Skipping\n",
255 gsi->port_num, qp_index);
256 return;
257 }
258
259 qp = create_gsi_ud_qp(gsi);
260 if (IS_ERR(qp)) {
261 mlx5_ib_warn(dev, "unable to create hardware UD QP for GSI: %ld\n",
262 PTR_ERR(qp));
263 return;
264 }
265
266 ret = modify_to_rts(gsi, qp, qp_index);
267 if (ret)
268 goto err_destroy_qp;
269
270 spin_lock_irqsave(&gsi->lock, flags);
271 WARN_ON_ONCE(gsi->tx_qps[qp_index]);
272 gsi->tx_qps[qp_index] = qp;
273 spin_unlock_irqrestore(&gsi->lock, flags);
274
275 return;
276
277err_destroy_qp:
278 WARN_ON_ONCE(qp);
279}
280
281static void setup_qps(struct mlx5_ib_gsi_qp *gsi)
282{
283 u16 qp_index;
284
285 for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index)
286 setup_qp(gsi, qp_index);
287}
288
Haggai Erand16e91d2016-02-29 15:45:05 +0200289int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
290 int attr_mask)
291{
292 struct mlx5_ib_dev *dev = to_mdev(qp->device);
293 struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
294 int ret;
295
296 mlx5_ib_dbg(dev, "modifying GSI QP to state %d\n", attr->qp_state);
297
298 mutex_lock(&gsi->mutex);
299 ret = ib_modify_qp(gsi->rx_qp, attr, attr_mask);
Haggai Eranebab41c2016-02-29 15:45:06 +0200300 if (ret) {
301 mlx5_ib_warn(dev, "unable to modify GSI rx QP: %d\n", ret);
302 goto unlock;
303 }
304
305 if (to_mqp(gsi->rx_qp)->state == IB_QPS_RTS)
306 setup_qps(gsi);
307
308unlock:
Haggai Erand16e91d2016-02-29 15:45:05 +0200309 mutex_unlock(&gsi->mutex);
310
311 return ret;
312}
313
314int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
315 int qp_attr_mask,
316 struct ib_qp_init_attr *qp_init_attr)
317{
318 struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
319 int ret;
320
321 mutex_lock(&gsi->mutex);
322 ret = ib_query_qp(gsi->rx_qp, qp_attr, qp_attr_mask, qp_init_attr);
323 qp_init_attr->cap = gsi->cap;
324 mutex_unlock(&gsi->mutex);
325
326 return ret;
327}
328
329int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
330 struct ib_send_wr **bad_wr)
331{
332 struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
333
334 return ib_post_send(gsi->rx_qp, wr, bad_wr);
335}
336
337int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
338 struct ib_recv_wr **bad_wr)
339{
340 struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
341
342 return ib_post_recv(gsi->rx_qp, wr, bad_wr);
343}