blob: 8419dcc111d830e8bbc7a8eb6aae5c2464aff15f [file] [log] [blame]
Ram Amrani51ff1722016-10-01 21:59:57 +03001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Ram Amrani51ff1722016-10-01 21:59:57 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/types.h>
33#include <asm/byteorder.h>
34#include <linux/bitops.h>
35#include <linux/delay.h>
36#include <linux/dma-mapping.h>
37#include <linux/errno.h>
Ram Amrani51ff1722016-10-01 21:59:57 +030038#include <linux/if_ether.h>
39#include <linux/if_vlan.h>
40#include <linux/io.h>
41#include <linux/ip.h>
42#include <linux/ipv6.h>
43#include <linux/kernel.h>
44#include <linux/list.h>
45#include <linux/module.h>
46#include <linux/mutex.h>
47#include <linux/pci.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
50#include <linux/string.h>
51#include <linux/tcp.h>
52#include <linux/bitops.h>
53#include <linux/qed/qed_roce_if.h>
54#include <linux/qed/qed_roce_if.h>
55#include "qed.h"
56#include "qed_cxt.h"
57#include "qed_hsi.h"
58#include "qed_hw.h"
59#include "qed_init_ops.h"
60#include "qed_int.h"
61#include "qed_ll2.h"
62#include "qed_mcp.h"
63#include "qed_reg_addr.h"
64#include "qed_sp.h"
65#include "qed_roce.h"
Ram Amraniabd49672016-10-01 22:00:01 +030066#include "qed_ll2.h"
Michal Kalderon0518c122017-06-09 17:13:22 +030067#include <linux/qed/qed_ll2_if.h>
Ram Amrani51ff1722016-10-01 21:59:57 +030068
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020069static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
Ram Amrani51ff1722016-10-01 21:59:57 +030070
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020071void qed_roce_async_event(struct qed_hwfn *p_hwfn,
72 u8 fw_event_code, union rdma_eqe_data *rdma_data)
73{
74 if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
75 u16 icid =
76 (u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid);
77
78 /* icid release in this async event can occur only if the icid
79 * was offloaded to the FW. In case it wasn't offloaded this is
80 * handled in qed_roce_sp_destroy_qp.
81 */
82 qed_roce_free_real_icid(p_hwfn, icid);
83 } else {
84 struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
85
86 events->affiliated_event(p_hwfn->p_rdma_info->events.context,
87 fw_event_code,
88 &rdma_data->async_handle);
89 }
Ram Amrani51ff1722016-10-01 21:59:57 +030090}
91
92static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
Ram Amranie015d582017-04-30 11:49:08 +030093 struct qed_bmap *bmap, u32 max_count, char *name)
Ram Amrani51ff1722016-10-01 21:59:57 +030094{
95 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
96
97 bmap->max_count = max_count;
98
99 bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long),
100 GFP_KERNEL);
101 if (!bmap->bitmap) {
102 DP_NOTICE(p_hwfn,
103 "qed bmap alloc failed: cannot allocate memory (bitmap)\n");
104 return -ENOMEM;
105 }
106
Ram Amranie015d582017-04-30 11:49:08 +0300107 snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
108
109 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
Ram Amrani51ff1722016-10-01 21:59:57 +0300110 return 0;
111}
112
113static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
114 struct qed_bmap *bmap, u32 *id_num)
115{
Ram Amrani51ff1722016-10-01 21:59:57 +0300116 *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
Ram Amranie015d582017-04-30 11:49:08 +0300117 if (*id_num >= bmap->max_count)
Ram Amrani51ff1722016-10-01 21:59:57 +0300118 return -EINVAL;
Ram Amrani51ff1722016-10-01 21:59:57 +0300119
120 __set_bit(*id_num, bmap->bitmap);
121
Ram Amranie015d582017-04-30 11:49:08 +0300122 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
123 bmap->name, *id_num);
124
Ram Amrani51ff1722016-10-01 21:59:57 +0300125 return 0;
126}
127
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200128static void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
129 struct qed_bmap *bmap, u32 id_num)
130{
131 if (id_num >= bmap->max_count)
132 return;
133
134 __set_bit(id_num, bmap->bitmap);
135}
136
Ram Amrani51ff1722016-10-01 21:59:57 +0300137static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
138 struct qed_bmap *bmap, u32 id_num)
139{
140 bool b_acquired;
141
Ram Amrani51ff1722016-10-01 21:59:57 +0300142 if (id_num >= bmap->max_count)
143 return;
144
145 b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
146 if (!b_acquired) {
Ram Amranie015d582017-04-30 11:49:08 +0300147 DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
148 bmap->name, id_num);
Ram Amrani51ff1722016-10-01 21:59:57 +0300149 return;
150 }
Ram Amranie015d582017-04-30 11:49:08 +0300151
152 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
153 bmap->name, id_num);
Ram Amrani51ff1722016-10-01 21:59:57 +0300154}
155
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200156static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
157 struct qed_bmap *bmap, u32 id_num)
158{
159 if (id_num >= bmap->max_count)
160 return -1;
161
162 return test_bit(id_num, bmap->bitmap);
163}
164
Mintz, Yuval9331dad2017-06-20 16:00:02 +0300165static bool qed_bmap_is_empty(struct qed_bmap *bmap)
166{
167 return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
168}
169
Yuval Mintz0189efb2016-10-13 22:57:02 +0300170static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
Ram Amrani51ff1722016-10-01 21:59:57 +0300171{
172 /* First sb id for RoCE is after all the l2 sb */
173 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
174}
175
Ram Amrani51ff1722016-10-01 21:59:57 +0300176static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
177 struct qed_ptt *p_ptt,
178 struct qed_rdma_start_in_params *params)
179{
180 struct qed_rdma_info *p_rdma_info;
181 u32 num_cons, num_tasks;
182 int rc = -ENOMEM;
183
184 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
185
186 /* Allocate a struct with current pf rdma info */
187 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
188 if (!p_rdma_info) {
189 DP_NOTICE(p_hwfn,
190 "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n",
191 rc);
192 return rc;
193 }
194
195 p_hwfn->p_rdma_info = p_rdma_info;
196 p_rdma_info->proto = PROTOCOLID_ROCE;
197
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300198 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
199 NULL);
Ram Amrani51ff1722016-10-01 21:59:57 +0300200
201 p_rdma_info->num_qps = num_cons / 2;
202
203 num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
204
205 /* Each MR uses a single task */
206 p_rdma_info->num_mrs = num_tasks;
207
208 /* Queue zone lines are shared between RoCE and L2 in such a way that
209 * they can be used by each without obstructing the other.
210 */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200211 p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
212 p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
Ram Amrani51ff1722016-10-01 21:59:57 +0300213
214 /* Allocate a struct with device params and fill it */
215 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
216 if (!p_rdma_info->dev) {
217 DP_NOTICE(p_hwfn,
218 "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
219 rc);
220 goto free_rdma_info;
221 }
222
223 /* Allocate a struct with port params and fill it */
224 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
225 if (!p_rdma_info->port) {
226 DP_NOTICE(p_hwfn,
227 "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n",
228 rc);
229 goto free_rdma_dev;
230 }
231
232 /* Allocate bit map for pd's */
Ram Amranie015d582017-04-30 11:49:08 +0300233 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
234 "PD");
Ram Amrani51ff1722016-10-01 21:59:57 +0300235 if (rc) {
236 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
237 "Failed to allocate pd_map, rc = %d\n",
238 rc);
239 goto free_rdma_port;
240 }
241
242 /* Allocate DPI bitmap */
243 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
Ram Amranie015d582017-04-30 11:49:08 +0300244 p_hwfn->dpi_count, "DPI");
Ram Amrani51ff1722016-10-01 21:59:57 +0300245 if (rc) {
246 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
247 "Failed to allocate DPI bitmap, rc = %d\n", rc);
248 goto free_pd_map;
249 }
250
251 /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
252 * twice the number of QPs.
253 */
254 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
Ram Amranie015d582017-04-30 11:49:08 +0300255 p_rdma_info->num_qps * 2, "CQ");
Ram Amrani51ff1722016-10-01 21:59:57 +0300256 if (rc) {
257 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
258 "Failed to allocate cq bitmap, rc = %d\n", rc);
259 goto free_dpi_map;
260 }
261
262 /* Allocate bitmap for toggle bit for cq icids
263 * We toggle the bit every time we create or resize cq for a given icid.
264 * The maximum number of CQs is bounded to twice the number of QPs.
265 */
266 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
Ram Amranie015d582017-04-30 11:49:08 +0300267 p_rdma_info->num_qps * 2, "Toggle");
Ram Amrani51ff1722016-10-01 21:59:57 +0300268 if (rc) {
269 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
270 "Failed to allocate toogle bits, rc = %d\n", rc);
271 goto free_cq_map;
272 }
273
274 /* Allocate bitmap for itids */
275 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
Ram Amranie015d582017-04-30 11:49:08 +0300276 p_rdma_info->num_mrs, "MR");
Ram Amrani51ff1722016-10-01 21:59:57 +0300277 if (rc) {
278 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
279 "Failed to allocate itids bitmaps, rc = %d\n", rc);
280 goto free_toggle_map;
281 }
282
283 /* Allocate bitmap for cids used for qps. */
Ram Amranie015d582017-04-30 11:49:08 +0300284 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
285 "CID");
Ram Amrani51ff1722016-10-01 21:59:57 +0300286 if (rc) {
287 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
288 "Failed to allocate cid bitmap, rc = %d\n", rc);
289 goto free_tid_map;
290 }
291
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200292 /* Allocate bitmap for cids used for responders/requesters. */
Ram Amranie015d582017-04-30 11:49:08 +0300293 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
294 "REAL_CID");
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200295 if (rc) {
296 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
297 "Failed to allocate real cid bitmap, rc = %d\n", rc);
298 goto free_cid_map;
299 }
Ram Amrani51ff1722016-10-01 21:59:57 +0300300 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
301 return 0;
302
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200303free_cid_map:
304 kfree(p_rdma_info->cid_map.bitmap);
Ram Amrani51ff1722016-10-01 21:59:57 +0300305free_tid_map:
306 kfree(p_rdma_info->tid_map.bitmap);
307free_toggle_map:
308 kfree(p_rdma_info->toggle_bits.bitmap);
309free_cq_map:
310 kfree(p_rdma_info->cq_map.bitmap);
311free_dpi_map:
312 kfree(p_rdma_info->dpi_map.bitmap);
313free_pd_map:
314 kfree(p_rdma_info->pd_map.bitmap);
315free_rdma_port:
316 kfree(p_rdma_info->port);
317free_rdma_dev:
318 kfree(p_rdma_info->dev);
319free_rdma_info:
320 kfree(p_rdma_info);
321
322 return rc;
323}
324
Ram Amranie015d582017-04-30 11:49:08 +0300325static void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
326 struct qed_bmap *bmap, bool check)
327{
328 int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
329 int last_line = bmap->max_count / (64 * 8);
330 int last_item = last_line * 8 +
331 DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
332 u64 *pmap = (u64 *)bmap->bitmap;
333 int line, item, offset;
334 u8 str_last_line[200] = { 0 };
335
336 if (!weight || !check)
337 goto end;
338
339 DP_NOTICE(p_hwfn,
340 "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
341 bmap->name, bmap->max_count, weight);
342
343 /* print aligned non-zero lines, if any */
344 for (item = 0, line = 0; line < last_line; line++, item += 8)
345 if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
346 DP_NOTICE(p_hwfn,
347 "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
348 line,
349 pmap[item],
350 pmap[item + 1],
351 pmap[item + 2],
352 pmap[item + 3],
353 pmap[item + 4],
354 pmap[item + 5],
355 pmap[item + 6], pmap[item + 7]);
356
357 /* print last unaligned non-zero line, if any */
358 if ((bmap->max_count % (64 * 8)) &&
359 (bitmap_weight((unsigned long *)&pmap[item],
360 bmap->max_count - item * 64))) {
361 offset = sprintf(str_last_line, "line 0x%04x: ", line);
362 for (; item < last_item; item++)
363 offset += sprintf(str_last_line + offset,
364 "0x%016llx ", pmap[item]);
365 DP_NOTICE(p_hwfn, "%s\n", str_last_line);
366 }
367
368end:
369 kfree(bmap->bitmap);
370 bmap->bitmap = NULL;
371}
372
Yuval Mintz0189efb2016-10-13 22:57:02 +0300373static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
Ram Amrani51ff1722016-10-01 21:59:57 +0300374{
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200375 struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
Ram Amrani51ff1722016-10-01 21:59:57 +0300376 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200377 int wait_count = 0;
378
379 /* when destroying a_RoCE QP the control is returned to the user after
380 * the synchronous part. The asynchronous part may take a little longer.
381 * We delay for a short while if an async destroy QP is still expected.
382 * Beyond the added delay we clear the bitmap anyway.
383 */
384 while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
385 msleep(100);
386 if (wait_count++ > 20) {
387 DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
388 break;
389 }
390 }
Ram Amrani51ff1722016-10-01 21:59:57 +0300391
Ram Amranie015d582017-04-30 11:49:08 +0300392 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
393 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
394 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
395 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
396 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
397 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
Ram Amrani51ff1722016-10-01 21:59:57 +0300398
399 kfree(p_rdma_info->port);
400 kfree(p_rdma_info->dev);
401
402 kfree(p_rdma_info);
403}
404
405static void qed_rdma_free(struct qed_hwfn *p_hwfn)
406{
407 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
408
409 qed_rdma_resc_free(p_hwfn);
410}
411
412static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
413{
414 guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
415 guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
416 guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
417 guid[3] = 0xff;
418 guid[4] = 0xfe;
419 guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
420 guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
421 guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
422}
423
424static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
425 struct qed_rdma_start_in_params *params)
426{
427 struct qed_rdma_events *events;
428
429 events = &p_hwfn->p_rdma_info->events;
430
431 events->unaffiliated_event = params->events->unaffiliated_event;
432 events->affiliated_event = params->events->affiliated_event;
433 events->context = params->events->context;
434}
435
436static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
437 struct qed_rdma_start_in_params *params)
438{
439 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
440 struct qed_dev *cdev = p_hwfn->cdev;
441 u32 pci_status_control;
442 u32 num_qps;
443
444 /* Vendor specific information */
445 dev->vendor_id = cdev->vendor_id;
446 dev->vendor_part_id = cdev->device_id;
447 dev->hw_ver = 0;
448 dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
449 (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
450
451 qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
452 dev->node_guid = dev->sys_image_guid;
453
454 dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
455 RDMA_MAX_SGE_PER_RQ_WQE);
456
457 if (cdev->rdma_max_sge)
458 dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
459
460 dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
461
462 dev->max_inline = (cdev->rdma_max_inline) ?
463 min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
464 dev->max_inline;
465
466 dev->max_wqe = QED_RDMA_MAX_WQE;
467 dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
468
469 /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
470 * it is up-aligned to 16 and then to ILT page size within qed cxt.
471 * This is OK in terms of ILT but we don't want to configure the FW
472 * above its abilities
473 */
474 num_qps = ROCE_MAX_QPS;
475 num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
476 dev->max_qp = num_qps;
477
478 /* CQs uses the same icids that QPs use hence they are limited by the
479 * number of icids. There are two icids per QP.
480 */
481 dev->max_cq = num_qps * 2;
482
483 /* The number of mrs is smaller by 1 since the first is reserved */
484 dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
485 dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
486
487 /* The maximum CQE capacity per CQ supported.
488 * max number of cqes will be in two layer pbl,
489 * 8 is the pointer size in bytes
490 * 32 is the size of cq element in bytes
491 */
492 if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
493 dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
494 else
495 dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
496
497 dev->max_mw = 0;
498 dev->max_fmr = QED_RDMA_MAX_FMR;
499 dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
500 dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
501 dev->max_pkey = QED_RDMA_MAX_P_KEY;
502
503 dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
504 (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
505 dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
506 RDMA_REQ_RD_ATOMIC_ELM_SIZE;
507 dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
508 p_hwfn->p_rdma_info->num_qps;
509 dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
510 dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
511 dev->max_pd = RDMA_MAX_PDS;
512 dev->max_ah = p_hwfn->p_rdma_info->num_qps;
513 dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
514
515 /* Set capablities */
516 dev->dev_caps = 0;
517 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
518 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
519 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
520 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
521 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
522 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
523 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
524 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
525
526 /* Check atomic operations support in PCI configuration space. */
527 pci_read_config_dword(cdev->pdev,
528 cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
529 &pci_status_control);
530
531 if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
532 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
533}
534
535static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
536{
537 struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
538 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
539
540 port->port_state = p_hwfn->mcp_info->link_output.link_up ?
541 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
542
543 port->max_msg_size = min_t(u64,
544 (dev->max_mr_mw_fmr_size *
545 p_hwfn->cdev->rdma_max_sge),
546 BIT(31));
547
548 port->pkey_bad_counter = 0;
549}
550
551static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
552{
553 u32 ll2_ethertype_en;
554
555 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
556 p_hwfn->b_rdma_enabled_in_prs = false;
557
558 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
559
560 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
561
562 /* We delay writing to this reg until first cid is allocated. See
563 * qed_cxt_dynamic_ilt_alloc function for more details
564 */
565 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
566 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
567 (ll2_ethertype_en | 0x01));
568
569 if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
570 DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
571 return -EINVAL;
572 }
573
574 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
575 return 0;
576}
577
578static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
579 struct qed_rdma_start_in_params *params,
580 struct qed_ptt *p_ptt)
581{
582 struct rdma_init_func_ramrod_data *p_ramrod;
583 struct qed_rdma_cnq_params *p_cnq_pbl_list;
584 struct rdma_init_func_hdr *p_params_header;
585 struct rdma_cnq_params *p_cnq_params;
586 struct qed_sp_init_data init_data;
587 struct qed_spq_entry *p_ent;
588 u32 cnq_id, sb_id;
Mintz, Yuval50a20712017-06-01 15:29:09 +0300589 u16 igu_sb_id;
Ram Amrani51ff1722016-10-01 21:59:57 +0300590 int rc;
591
592 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
593
594 /* Save the number of cnqs for the function close ramrod */
595 p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
596
597 /* Get SPQ entry */
598 memset(&init_data, 0, sizeof(init_data));
599 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
600 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
601
602 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
603 p_hwfn->p_rdma_info->proto, &init_data);
604 if (rc)
605 return rc;
606
607 p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
608
609 p_params_header = &p_ramrod->params_header;
610 p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
611 QED_RDMA_CNQ_RAM);
612 p_params_header->num_cnqs = params->desired_cnq;
613
614 if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
615 p_params_header->cq_ring_mode = 1;
616 else
617 p_params_header->cq_ring_mode = 0;
618
619 for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
620 sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
Mintz, Yuval50a20712017-06-01 15:29:09 +0300621 igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
622 p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id);
Ram Amrani51ff1722016-10-01 21:59:57 +0300623 p_cnq_params = &p_ramrod->cnq_params[cnq_id];
624 p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
Ram Amrani51ff1722016-10-01 21:59:57 +0300625
626 p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
627 p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
628
629 DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
630 p_cnq_pbl_list->pbl_ptr);
631
632 /* we assume here that cnq_id and qz_offset are the same */
633 p_cnq_params->queue_zone_num =
634 cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
635 cnq_id);
636 }
637
638 return qed_spq_post(p_hwfn, p_ent, NULL);
639}
640
Yuval Mintz0189efb2016-10-13 22:57:02 +0300641static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
642{
643 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
644 int rc;
645
646 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
647
648 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
649 rc = qed_rdma_bmap_alloc_id(p_hwfn,
650 &p_hwfn->p_rdma_info->tid_map, itid);
651 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
652 if (rc)
653 goto out;
654
655 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
656out:
657 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
658 return rc;
659}
660
Ram Amrani51ff1722016-10-01 21:59:57 +0300661static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
662{
663 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
664
665 /* The first DPI is reserved for the Kernel */
666 __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
667
668 /* Tid 0 will be used as the key for "reserved MR".
669 * The driver should allocate memory for it so it can be loaded but no
670 * ramrod should be passed on it.
671 */
672 qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
673 if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
674 DP_NOTICE(p_hwfn,
675 "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
676 return -EINVAL;
677 }
678
679 return 0;
680}
681
682static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
683 struct qed_ptt *p_ptt,
684 struct qed_rdma_start_in_params *params)
685{
686 int rc;
687
688 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
689
690 spin_lock_init(&p_hwfn->p_rdma_info->lock);
691
692 qed_rdma_init_devinfo(p_hwfn, params);
693 qed_rdma_init_port(p_hwfn);
694 qed_rdma_init_events(p_hwfn, params);
695
696 rc = qed_rdma_reserve_lkey(p_hwfn);
697 if (rc)
698 return rc;
699
700 rc = qed_rdma_init_hw(p_hwfn, p_ptt);
701 if (rc)
702 return rc;
703
704 return qed_rdma_start_fw(p_hwfn, params, p_ptt);
705}
706
Yuval Mintz0189efb2016-10-13 22:57:02 +0300707static int qed_rdma_stop(void *rdma_cxt)
Ram Amrani51ff1722016-10-01 21:59:57 +0300708{
709 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
710 struct rdma_close_func_ramrod_data *p_ramrod;
711 struct qed_sp_init_data init_data;
712 struct qed_spq_entry *p_ent;
713 struct qed_ptt *p_ptt;
714 u32 ll2_ethertype_en;
715 int rc = -EBUSY;
716
717 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
718
719 p_ptt = qed_ptt_acquire(p_hwfn);
720 if (!p_ptt) {
721 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
722 return rc;
723 }
724
725 /* Disable RoCE search */
726 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
727 p_hwfn->b_rdma_enabled_in_prs = false;
728
729 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
730
731 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
732
733 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
734 (ll2_ethertype_en & 0xFFFE));
735
736 qed_ptt_release(p_hwfn, p_ptt);
737
738 /* Get SPQ entry */
739 memset(&init_data, 0, sizeof(init_data));
740 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
741 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
742
743 /* Stop RoCE */
744 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
745 p_hwfn->p_rdma_info->proto, &init_data);
746 if (rc)
747 goto out;
748
749 p_ramrod = &p_ent->ramrod.rdma_close_func;
750
751 p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
752 p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
753
754 rc = qed_spq_post(p_hwfn, p_ent, NULL);
755
756out:
757 qed_rdma_free(p_hwfn);
758
759 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
760 return rc;
761}
762
Yuval Mintz0189efb2016-10-13 22:57:02 +0300763static int qed_rdma_add_user(void *rdma_cxt,
764 struct qed_rdma_add_user_out_params *out_params)
Ram Amrani51ff1722016-10-01 21:59:57 +0300765{
766 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
767 u32 dpi_start_offset;
768 u32 returned_id = 0;
769 int rc;
770
771 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
772
773 /* Allocate DPI */
774 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
775 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
776 &returned_id);
777 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
778
779 out_params->dpi = (u16)returned_id;
780
781 /* Calculate the corresponding DPI address */
782 dpi_start_offset = p_hwfn->dpi_start_offset;
783
784 out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
785 dpi_start_offset +
786 ((out_params->dpi) * p_hwfn->dpi_size));
787
788 out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
789 dpi_start_offset +
790 ((out_params->dpi) * p_hwfn->dpi_size);
791
792 out_params->dpi_size = p_hwfn->dpi_size;
Ram Amrani20b1bd92017-04-30 11:49:10 +0300793 out_params->wid_count = p_hwfn->wid_count;
Ram Amrani51ff1722016-10-01 21:59:57 +0300794
795 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
796 return rc;
797}
798
Yuval Mintz0189efb2016-10-13 22:57:02 +0300799static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
Ram Amranic295f862016-10-01 21:59:58 +0300800{
801 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
802 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
803
804 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
805
806 /* Link may have changed */
807 p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
808 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
809
810 p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
811
Ram Amrani793ea8a2017-04-30 11:49:05 +0300812 p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
813
Ram Amranic295f862016-10-01 21:59:58 +0300814 return p_port;
815}
816
Yuval Mintz0189efb2016-10-13 22:57:02 +0300817static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
Ram Amrani51ff1722016-10-01 21:59:57 +0300818{
819 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
820
821 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
822
823 /* Return struct with device parameters */
824 return p_hwfn->p_rdma_info->dev;
825}
826
Yuval Mintz0189efb2016-10-13 22:57:02 +0300827static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
Ram Amraniee8eaea2016-10-01 22:00:00 +0300828{
829 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
830
831 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
832
833 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
834 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
835 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
836}
837
Yuval Mintz0189efb2016-10-13 22:57:02 +0300838static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
Ram Amrani51ff1722016-10-01 21:59:57 +0300839{
840 struct qed_hwfn *p_hwfn;
841 u16 qz_num;
842 u32 addr;
843
844 p_hwfn = (struct qed_hwfn *)rdma_cxt;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200845
846 if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
847 DP_NOTICE(p_hwfn,
848 "queue zone offset %d is too large (max is %d)\n",
849 qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
850 return;
851 }
852
Ram Amrani51ff1722016-10-01 21:59:57 +0300853 qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
854 addr = GTT_BAR0_MAP_REG_USDM_RAM +
855 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
856
857 REG_WR16(p_hwfn, addr, prod);
858
859 /* keep prod updates ordered */
860 wmb();
861}
862
863static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
864 struct qed_dev_rdma_info *info)
865{
Ram Amrani20b1bd92017-04-30 11:49:10 +0300866 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
867
Ram Amrani51ff1722016-10-01 21:59:57 +0300868 memset(info, 0, sizeof(*info));
869
870 info->rdma_type = QED_RDMA_TYPE_ROCE;
Ram Amrani20b1bd92017-04-30 11:49:10 +0300871 info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
Ram Amrani51ff1722016-10-01 21:59:57 +0300872
873 qed_fill_dev_info(cdev, &info->common);
874
875 return 0;
876}
877
878static int qed_rdma_get_sb_start(struct qed_dev *cdev)
879{
880 int feat_num;
881
882 if (cdev->num_hwfns > 1)
883 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
884 else
885 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
886 cdev->num_hwfns;
887
888 return feat_num;
889}
890
891static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
892{
893 int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
894 int n_msix = cdev->int_params.rdma_msix_cnt;
895
896 return min_t(int, n_cnq, n_msix);
897}
898
899static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
900{
901 int limit = 0;
902
903 /* Mark the fastpath as free/used */
904 cdev->int_params.fp_initialized = cnt ? true : false;
905
906 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
907 DP_ERR(cdev,
908 "qed roce supports only MSI-X interrupts (detected %d).\n",
909 cdev->int_params.out.int_mode);
910 return -EINVAL;
911 } else if (cdev->int_params.fp_msix_cnt) {
912 limit = cdev->int_params.rdma_msix_cnt;
913 }
914
915 if (!limit)
916 return -ENOMEM;
917
918 return min_t(int, cnt, limit);
919}
920
921static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
922{
923 memset(info, 0, sizeof(*info));
924
925 if (!cdev->int_params.fp_initialized) {
926 DP_INFO(cdev,
927 "Protocol driver requested interrupt information, but its support is not yet configured\n");
928 return -EINVAL;
929 }
930
931 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
932 int msix_base = cdev->int_params.rdma_msix_base;
933
934 info->msix_cnt = cdev->int_params.rdma_msix_cnt;
935 info->msix = &cdev->int_params.msix_table[msix_base];
936
937 DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
938 info->msix_cnt, msix_base);
939 }
940
941 return 0;
942}
943
Yuval Mintz0189efb2016-10-13 22:57:02 +0300944static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
Ram Amranic295f862016-10-01 21:59:58 +0300945{
946 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
947 u32 returned_id;
948 int rc;
949
950 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
951
952 /* Allocates an unused protection domain */
953 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
954 rc = qed_rdma_bmap_alloc_id(p_hwfn,
955 &p_hwfn->p_rdma_info->pd_map, &returned_id);
956 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
957
958 *pd = (u16)returned_id;
959
960 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
961 return rc;
962}
963
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300964static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
Ram Amranic295f862016-10-01 21:59:58 +0300965{
966 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
967
968 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
969
970 /* Returns a previously allocated protection domain for reuse */
971 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
972 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
973 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
974}
975
976static enum qed_rdma_toggle_bit
977qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
978{
979 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
980 enum qed_rdma_toggle_bit toggle_bit;
981 u32 bmap_id;
982
983 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
984
985 /* the function toggle the bit that is related to a given icid
986 * and returns the new toggle bit's value
987 */
988 bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
989
990 spin_lock_bh(&p_info->lock);
991 toggle_bit = !test_and_change_bit(bmap_id,
992 p_info->toggle_bits.bitmap);
993 spin_unlock_bh(&p_info->lock);
994
995 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
996 toggle_bit);
997
998 return toggle_bit;
999}
1000
Yuval Mintz8c93bea2016-10-13 22:57:03 +03001001static int qed_rdma_create_cq(void *rdma_cxt,
1002 struct qed_rdma_create_cq_in_params *params,
1003 u16 *icid)
Ram Amranic295f862016-10-01 21:59:58 +03001004{
1005 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1006 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
1007 struct rdma_create_cq_ramrod_data *p_ramrod;
1008 enum qed_rdma_toggle_bit toggle_bit;
1009 struct qed_sp_init_data init_data;
1010 struct qed_spq_entry *p_ent;
1011 u32 returned_id, start_cid;
1012 int rc;
1013
1014 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
1015 params->cq_handle_hi, params->cq_handle_lo);
1016
1017 /* Allocate icid */
1018 spin_lock_bh(&p_info->lock);
Ram Amranie015d582017-04-30 11:49:08 +03001019 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
Ram Amranic295f862016-10-01 21:59:58 +03001020 spin_unlock_bh(&p_info->lock);
1021
1022 if (rc) {
1023 DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
1024 return rc;
1025 }
1026
1027 start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
1028 p_info->proto);
1029 *icid = returned_id + start_cid;
1030
1031 /* Check if icid requires a page allocation */
1032 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
1033 if (rc)
1034 goto err;
1035
1036 /* Get SPQ entry */
1037 memset(&init_data, 0, sizeof(init_data));
1038 init_data.cid = *icid;
1039 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1040 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1041
1042 /* Send create CQ ramrod */
1043 rc = qed_sp_init_request(p_hwfn, &p_ent,
1044 RDMA_RAMROD_CREATE_CQ,
1045 p_info->proto, &init_data);
1046 if (rc)
1047 goto err;
1048
1049 p_ramrod = &p_ent->ramrod.rdma_create_cq;
1050
1051 p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
1052 p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
1053 p_ramrod->dpi = cpu_to_le16(params->dpi);
1054 p_ramrod->is_two_level_pbl = params->pbl_two_level;
1055 p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
1056 DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
1057 p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
1058 p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
1059 params->cnq_id;
1060 p_ramrod->int_timeout = params->int_timeout;
1061
1062 /* toggle the bit for every resize or create cq for a given icid */
1063 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1064
1065 p_ramrod->toggle_bit = toggle_bit;
1066
1067 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1068 if (rc) {
1069 /* restore toggle bit */
1070 qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1071 goto err;
1072 }
1073
1074 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
1075 return rc;
1076
1077err:
1078 /* release allocated icid */
Ram Amrani670dde52017-02-20 22:43:30 +02001079 spin_lock_bh(&p_info->lock);
Ram Amranic295f862016-10-01 21:59:58 +03001080 qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
Ram Amrani670dde52017-02-20 22:43:30 +02001081 spin_unlock_bh(&p_info->lock);
Ram Amranic295f862016-10-01 21:59:58 +03001082 DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
1083
1084 return rc;
1085}
1086
Yuval Mintz8c93bea2016-10-13 22:57:03 +03001087static int
1088qed_rdma_destroy_cq(void *rdma_cxt,
1089 struct qed_rdma_destroy_cq_in_params *in_params,
1090 struct qed_rdma_destroy_cq_out_params *out_params)
Ram Amranic295f862016-10-01 21:59:58 +03001091{
1092 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1093 struct rdma_destroy_cq_output_params *p_ramrod_res;
1094 struct rdma_destroy_cq_ramrod_data *p_ramrod;
1095 struct qed_sp_init_data init_data;
1096 struct qed_spq_entry *p_ent;
1097 dma_addr_t ramrod_res_phys;
1098 int rc = -ENOMEM;
1099
1100 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
1101
1102 p_ramrod_res =
1103 (struct rdma_destroy_cq_output_params *)
1104 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1105 sizeof(struct rdma_destroy_cq_output_params),
1106 &ramrod_res_phys, GFP_KERNEL);
1107 if (!p_ramrod_res) {
1108 DP_NOTICE(p_hwfn,
1109 "qed destroy cq failed: cannot allocate memory (ramrod)\n");
1110 return rc;
1111 }
1112
1113 /* Get SPQ entry */
1114 memset(&init_data, 0, sizeof(init_data));
1115 init_data.cid = in_params->icid;
1116 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1117 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1118
1119 /* Send destroy CQ ramrod */
1120 rc = qed_sp_init_request(p_hwfn, &p_ent,
1121 RDMA_RAMROD_DESTROY_CQ,
1122 p_hwfn->p_rdma_info->proto, &init_data);
1123 if (rc)
1124 goto err;
1125
1126 p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
1127 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1128
1129 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1130 if (rc)
1131 goto err;
1132
1133 out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
1134
1135 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1136 sizeof(struct rdma_destroy_cq_output_params),
1137 p_ramrod_res, ramrod_res_phys);
1138
1139 /* Free icid */
1140 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1141
1142 qed_bmap_release_id(p_hwfn,
1143 &p_hwfn->p_rdma_info->cq_map,
1144 (in_params->icid -
1145 qed_cxt_get_proto_cid_start(p_hwfn,
1146 p_hwfn->
1147 p_rdma_info->proto)));
1148
1149 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1150
1151 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
1152 return rc;
1153
1154err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1155 sizeof(struct rdma_destroy_cq_output_params),
1156 p_ramrod_res, ramrod_res_phys);
1157
1158 return rc;
1159}
1160
Ram Amranif1093942016-10-01 21:59:59 +03001161static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
1162{
1163 p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
1164 p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
1165 p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
1166}
1167
1168static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
1169 __le32 *dst_gid)
1170{
1171 u32 i;
1172
1173 if (qp->roce_mode == ROCE_V2_IPV4) {
1174 /* The IPv4 addresses shall be aligned to the highest word.
1175 * The lower words must be zero.
1176 */
1177 memset(src_gid, 0, sizeof(union qed_gid));
1178 memset(dst_gid, 0, sizeof(union qed_gid));
1179 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
1180 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
1181 } else {
1182 /* GIDs and IPv6 addresses coincide in location and size */
1183 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
1184 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
1185 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
1186 }
1187 }
1188}
1189
1190static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
1191{
1192 enum roce_flavor flavor;
1193
1194 switch (roce_mode) {
1195 case ROCE_V1:
1196 flavor = PLAIN_ROCE;
1197 break;
1198 case ROCE_V2_IPV4:
1199 flavor = RROCE_IPV4;
1200 break;
1201 case ROCE_V2_IPV6:
1202 flavor = ROCE_V2_IPV6;
1203 break;
1204 default:
1205 flavor = MAX_ROCE_MODE;
1206 break;
1207 }
1208 return flavor;
1209}
1210
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001211void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
1212{
1213 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1214 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
1215 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
1216 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1217}
1218
Yuval Mintz8c93bea2016-10-13 22:57:03 +03001219static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
Ram Amranif1093942016-10-01 21:59:59 +03001220{
1221 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1222 u32 responder_icid;
1223 u32 requester_icid;
1224 int rc;
1225
1226 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1227 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
1228 &responder_icid);
1229 if (rc) {
1230 spin_unlock_bh(&p_rdma_info->lock);
1231 return rc;
1232 }
1233
1234 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
1235 &requester_icid);
1236
1237 spin_unlock_bh(&p_rdma_info->lock);
1238 if (rc)
1239 goto err;
1240
1241 /* the two icid's should be adjacent */
1242 if ((requester_icid - responder_icid) != 1) {
1243 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
1244 rc = -EINVAL;
1245 goto err;
1246 }
1247
1248 responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
1249 p_rdma_info->proto);
1250 requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
1251 p_rdma_info->proto);
1252
1253 /* If these icids require a new ILT line allocate DMA-able context for
1254 * an ILT page
1255 */
1256 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
1257 if (rc)
1258 goto err;
1259
1260 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
1261 if (rc)
1262 goto err;
1263
1264 *cid = (u16)responder_icid;
1265 return rc;
1266
1267err:
1268 spin_lock_bh(&p_rdma_info->lock);
1269 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
1270 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
1271
1272 spin_unlock_bh(&p_rdma_info->lock);
1273 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1274 "Allocate CID - failed, rc = %d\n", rc);
1275 return rc;
1276}
1277
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001278static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
1279{
1280 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1281 qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
1282 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1283}
1284
Ram Amranif1093942016-10-01 21:59:59 +03001285static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
1286 struct qed_rdma_qp *qp)
1287{
1288 struct roce_create_qp_resp_ramrod_data *p_ramrod;
1289 struct qed_sp_init_data init_data;
Ram Amranif1093942016-10-01 21:59:59 +03001290 enum roce_flavor roce_flavor;
1291 struct qed_spq_entry *p_ent;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001292 u16 regular_latency_queue;
1293 enum protocol_type proto;
Ram Amranif1093942016-10-01 21:59:59 +03001294 int rc;
1295
1296 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1297
1298 /* Allocate DMA-able memory for IRQ */
1299 qp->irq_num_pages = 1;
1300 qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1301 RDMA_RING_PAGE_SIZE,
1302 &qp->irq_phys_addr, GFP_KERNEL);
1303 if (!qp->irq) {
1304 rc = -ENOMEM;
1305 DP_NOTICE(p_hwfn,
1306 "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
1307 rc);
1308 return rc;
1309 }
1310
1311 /* Get SPQ entry */
1312 memset(&init_data, 0, sizeof(init_data));
1313 init_data.cid = qp->icid;
1314 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1315 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1316
1317 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
1318 PROTOCOLID_ROCE, &init_data);
1319 if (rc)
1320 goto err;
1321
1322 p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
1323
1324 p_ramrod->flags = 0;
1325
1326 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
1327 SET_FIELD(p_ramrod->flags,
1328 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
1329
1330 SET_FIELD(p_ramrod->flags,
1331 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
1332 qp->incoming_rdma_read_en);
1333
1334 SET_FIELD(p_ramrod->flags,
1335 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
1336 qp->incoming_rdma_write_en);
1337
1338 SET_FIELD(p_ramrod->flags,
1339 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
1340 qp->incoming_atomic_en);
1341
1342 SET_FIELD(p_ramrod->flags,
1343 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
1344 qp->e2e_flow_control_en);
1345
1346 SET_FIELD(p_ramrod->flags,
1347 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
1348
1349 SET_FIELD(p_ramrod->flags,
1350 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
1351 qp->fmr_and_reserved_lkey);
1352
1353 SET_FIELD(p_ramrod->flags,
1354 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
1355 qp->min_rnr_nak_timer);
1356
1357 p_ramrod->max_ird = qp->max_rd_atomic_resp;
1358 p_ramrod->traffic_class = qp->traffic_class_tos;
1359 p_ramrod->hop_limit = qp->hop_limit_ttl;
1360 p_ramrod->irq_num_pages = qp->irq_num_pages;
1361 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1362 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1363 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
1364 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1365 p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
1366 p_ramrod->pd = cpu_to_le16(qp->pd);
1367 p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
1368 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
1369 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
1370 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1371 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
1372 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
1373 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
1374 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
Ram Amranif1093942016-10-01 21:59:59 +03001375 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
1376 qp->rq_cq_id);
1377
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001378 regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
Ram Amranif1093942016-10-01 21:59:59 +03001379
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001380 p_ramrod->regular_latency_phy_queue =
1381 cpu_to_le16(regular_latency_queue);
1382 p_ramrod->low_latency_phy_queue =
1383 cpu_to_le16(regular_latency_queue);
1384
Ram Amranif1093942016-10-01 21:59:59 +03001385 p_ramrod->dpi = cpu_to_le16(qp->dpi);
1386
1387 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
1388 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
1389
1390 p_ramrod->udp_src_port = qp->udp_src_port;
1391 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
1392 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
1393 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
1394
1395 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
1396 qp->stats_queue;
1397
1398 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1399
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001400 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1401 "rc = %d regular physical queue = 0x%x\n", rc,
1402 regular_latency_queue);
Ram Amranif1093942016-10-01 21:59:59 +03001403
1404 if (rc)
1405 goto err;
1406
1407 qp->resp_offloaded = true;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001408 qp->cq_prod = 0;
1409
1410 proto = p_hwfn->p_rdma_info->proto;
1411 qed_roce_set_real_cid(p_hwfn, qp->icid -
1412 qed_cxt_get_proto_cid_start(p_hwfn, proto));
Ram Amranif1093942016-10-01 21:59:59 +03001413
1414 return rc;
1415
1416err:
1417 DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
1418 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1419 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
1420 qp->irq, qp->irq_phys_addr);
1421
1422 return rc;
1423}
1424
1425static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
1426 struct qed_rdma_qp *qp)
1427{
1428 struct roce_create_qp_req_ramrod_data *p_ramrod;
1429 struct qed_sp_init_data init_data;
Ram Amranif1093942016-10-01 21:59:59 +03001430 enum roce_flavor roce_flavor;
1431 struct qed_spq_entry *p_ent;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001432 u16 regular_latency_queue;
1433 enum protocol_type proto;
Ram Amranif1093942016-10-01 21:59:59 +03001434 int rc;
1435
1436 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1437
1438 /* Allocate DMA-able memory for ORQ */
1439 qp->orq_num_pages = 1;
1440 qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1441 RDMA_RING_PAGE_SIZE,
1442 &qp->orq_phys_addr, GFP_KERNEL);
1443 if (!qp->orq) {
1444 rc = -ENOMEM;
1445 DP_NOTICE(p_hwfn,
1446 "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
1447 rc);
1448 return rc;
1449 }
1450
1451 /* Get SPQ entry */
1452 memset(&init_data, 0, sizeof(init_data));
1453 init_data.cid = qp->icid + 1;
1454 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1455 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1456
1457 rc = qed_sp_init_request(p_hwfn, &p_ent,
1458 ROCE_RAMROD_CREATE_QP,
1459 PROTOCOLID_ROCE, &init_data);
1460 if (rc)
1461 goto err;
1462
1463 p_ramrod = &p_ent->ramrod.roce_create_qp_req;
1464
1465 p_ramrod->flags = 0;
1466
1467 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
1468 SET_FIELD(p_ramrod->flags,
1469 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
1470
1471 SET_FIELD(p_ramrod->flags,
1472 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
1473 qp->fmr_and_reserved_lkey);
1474
1475 SET_FIELD(p_ramrod->flags,
1476 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
1477
1478 SET_FIELD(p_ramrod->flags,
1479 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
1480
1481 SET_FIELD(p_ramrod->flags,
1482 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
1483 qp->rnr_retry_cnt);
1484
1485 p_ramrod->max_ord = qp->max_rd_atomic_req;
1486 p_ramrod->traffic_class = qp->traffic_class_tos;
1487 p_ramrod->hop_limit = qp->hop_limit_ttl;
1488 p_ramrod->orq_num_pages = qp->orq_num_pages;
1489 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1490 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1491 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
1492 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
1493 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1494 p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
1495 p_ramrod->pd = cpu_to_le16(qp->pd);
1496 p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
1497 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
1498 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
1499 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1500 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
1501 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
1502 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
1503 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001504 p_ramrod->cq_cid =
1505 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
Ram Amranif1093942016-10-01 21:59:59 +03001506
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001507 regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
Ram Amranif1093942016-10-01 21:59:59 +03001508
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001509 p_ramrod->regular_latency_phy_queue =
1510 cpu_to_le16(regular_latency_queue);
1511 p_ramrod->low_latency_phy_queue =
1512 cpu_to_le16(regular_latency_queue);
1513
Ram Amranif1093942016-10-01 21:59:59 +03001514 p_ramrod->dpi = cpu_to_le16(qp->dpi);
1515
1516 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
1517 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
1518
1519 p_ramrod->udp_src_port = qp->udp_src_port;
1520 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
1521 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
1522 qp->stats_queue;
1523
1524 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1525
1526 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1527
1528 if (rc)
1529 goto err;
1530
1531 qp->req_offloaded = true;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001532 proto = p_hwfn->p_rdma_info->proto;
1533 qed_roce_set_real_cid(p_hwfn,
1534 qp->icid + 1 -
1535 qed_cxt_get_proto_cid_start(p_hwfn, proto));
Ram Amranif1093942016-10-01 21:59:59 +03001536
1537 return rc;
1538
1539err:
1540 DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
1541 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1542 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
1543 qp->orq, qp->orq_phys_addr);
1544 return rc;
1545}
1546
1547static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
1548 struct qed_rdma_qp *qp,
1549 bool move_to_err, u32 modify_flags)
1550{
1551 struct roce_modify_qp_resp_ramrod_data *p_ramrod;
1552 struct qed_sp_init_data init_data;
1553 struct qed_spq_entry *p_ent;
1554 int rc;
1555
1556 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1557
1558 if (move_to_err && !qp->resp_offloaded)
1559 return 0;
1560
1561 /* Get SPQ entry */
1562 memset(&init_data, 0, sizeof(init_data));
1563 init_data.cid = qp->icid;
1564 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1565 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1566
1567 rc = qed_sp_init_request(p_hwfn, &p_ent,
1568 ROCE_EVENT_MODIFY_QP,
1569 PROTOCOLID_ROCE, &init_data);
1570 if (rc) {
1571 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
1572 return rc;
1573 }
1574
1575 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
1576
1577 p_ramrod->flags = 0;
1578
1579 SET_FIELD(p_ramrod->flags,
1580 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
1581
1582 SET_FIELD(p_ramrod->flags,
1583 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
1584 qp->incoming_rdma_read_en);
1585
1586 SET_FIELD(p_ramrod->flags,
1587 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
1588 qp->incoming_rdma_write_en);
1589
1590 SET_FIELD(p_ramrod->flags,
1591 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
1592 qp->incoming_atomic_en);
1593
1594 SET_FIELD(p_ramrod->flags,
1595 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
1596 qp->e2e_flow_control_en);
1597
1598 SET_FIELD(p_ramrod->flags,
1599 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
1600 GET_FIELD(modify_flags,
1601 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
1602
1603 SET_FIELD(p_ramrod->flags,
1604 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
1605 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
1606
1607 SET_FIELD(p_ramrod->flags,
1608 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
1609 GET_FIELD(modify_flags,
1610 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
1611
1612 SET_FIELD(p_ramrod->flags,
1613 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
1614 GET_FIELD(modify_flags,
1615 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
1616
1617 SET_FIELD(p_ramrod->flags,
1618 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
1619 GET_FIELD(modify_flags,
1620 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
1621
1622 p_ramrod->fields = 0;
1623 SET_FIELD(p_ramrod->fields,
1624 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
1625 qp->min_rnr_nak_timer);
1626
1627 p_ramrod->max_ird = qp->max_rd_atomic_resp;
1628 p_ramrod->traffic_class = qp->traffic_class_tos;
1629 p_ramrod->hop_limit = qp->hop_limit_ttl;
1630 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1631 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1632 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1633 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1634 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1635
1636 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
1637 return rc;
1638}
1639
1640static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
1641 struct qed_rdma_qp *qp,
1642 bool move_to_sqd,
1643 bool move_to_err, u32 modify_flags)
1644{
1645 struct roce_modify_qp_req_ramrod_data *p_ramrod;
1646 struct qed_sp_init_data init_data;
1647 struct qed_spq_entry *p_ent;
1648 int rc;
1649
1650 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1651
1652 if (move_to_err && !(qp->req_offloaded))
1653 return 0;
1654
1655 /* Get SPQ entry */
1656 memset(&init_data, 0, sizeof(init_data));
1657 init_data.cid = qp->icid + 1;
1658 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1659 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1660
1661 rc = qed_sp_init_request(p_hwfn, &p_ent,
1662 ROCE_EVENT_MODIFY_QP,
1663 PROTOCOLID_ROCE, &init_data);
1664 if (rc) {
1665 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
1666 return rc;
1667 }
1668
1669 p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
1670
1671 p_ramrod->flags = 0;
1672
1673 SET_FIELD(p_ramrod->flags,
1674 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
1675
1676 SET_FIELD(p_ramrod->flags,
1677 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
1678
1679 SET_FIELD(p_ramrod->flags,
1680 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
1681 qp->sqd_async);
1682
1683 SET_FIELD(p_ramrod->flags,
1684 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
1685 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
1686
1687 SET_FIELD(p_ramrod->flags,
1688 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
1689 GET_FIELD(modify_flags,
1690 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
1691
1692 SET_FIELD(p_ramrod->flags,
1693 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
1694 GET_FIELD(modify_flags,
1695 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
1696
1697 SET_FIELD(p_ramrod->flags,
1698 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
1699 GET_FIELD(modify_flags,
1700 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
1701
1702 SET_FIELD(p_ramrod->flags,
1703 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
1704 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
1705
1706 SET_FIELD(p_ramrod->flags,
1707 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
1708 GET_FIELD(modify_flags,
1709 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
1710
1711 p_ramrod->fields = 0;
1712 SET_FIELD(p_ramrod->fields,
1713 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
1714
1715 SET_FIELD(p_ramrod->fields,
1716 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
1717 qp->rnr_retry_cnt);
1718
1719 p_ramrod->max_ord = qp->max_rd_atomic_req;
1720 p_ramrod->traffic_class = qp->traffic_class_tos;
1721 p_ramrod->hop_limit = qp->hop_limit_ttl;
1722 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1723 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1724 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
1725 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1726 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1727 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1728
1729 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
1730 return rc;
1731}
1732
1733static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
1734 struct qed_rdma_qp *qp,
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001735 u32 *num_invalidated_mw,
1736 u32 *cq_prod)
Ram Amranif1093942016-10-01 21:59:59 +03001737{
1738 struct roce_destroy_qp_resp_output_params *p_ramrod_res;
1739 struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
1740 struct qed_sp_init_data init_data;
1741 struct qed_spq_entry *p_ent;
1742 dma_addr_t ramrod_res_phys;
1743 int rc;
1744
1745 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1746
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001747 *num_invalidated_mw = 0;
1748 *cq_prod = qp->cq_prod;
1749
1750 if (!qp->resp_offloaded) {
1751 /* If a responder was never offload, we need to free the cids
1752 * allocated in create_qp as a FW async event will never arrive
1753 */
1754 u32 cid;
1755
1756 cid = qp->icid -
1757 qed_cxt_get_proto_cid_start(p_hwfn,
1758 p_hwfn->p_rdma_info->proto);
1759 qed_roce_free_cid_pair(p_hwfn, (u16)cid);
1760
Ram Amranif1093942016-10-01 21:59:59 +03001761 return 0;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001762 }
Ram Amranif1093942016-10-01 21:59:59 +03001763
1764 /* Get SPQ entry */
1765 memset(&init_data, 0, sizeof(init_data));
1766 init_data.cid = qp->icid;
1767 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1768 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1769
1770 rc = qed_sp_init_request(p_hwfn, &p_ent,
1771 ROCE_RAMROD_DESTROY_QP,
1772 PROTOCOLID_ROCE, &init_data);
1773 if (rc)
1774 return rc;
1775
1776 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
1777
1778 p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
1779 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
1780 &ramrod_res_phys, GFP_KERNEL);
1781
1782 if (!p_ramrod_res) {
1783 rc = -ENOMEM;
1784 DP_NOTICE(p_hwfn,
1785 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
1786 rc);
1787 return rc;
1788 }
1789
1790 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1791
1792 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1793 if (rc)
1794 goto err;
1795
1796 *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001797 *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
1798 qp->cq_prod = *cq_prod;
Ram Amranif1093942016-10-01 21:59:59 +03001799
1800 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
1801 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1802 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
1803 qp->irq, qp->irq_phys_addr);
1804
1805 qp->resp_offloaded = false;
1806
1807 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
1808
1809err:
1810 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1811 sizeof(struct roce_destroy_qp_resp_output_params),
1812 p_ramrod_res, ramrod_res_phys);
1813
1814 return rc;
1815}
1816
1817static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
1818 struct qed_rdma_qp *qp,
1819 u32 *num_bound_mw)
1820{
1821 struct roce_destroy_qp_req_output_params *p_ramrod_res;
1822 struct roce_destroy_qp_req_ramrod_data *p_ramrod;
1823 struct qed_sp_init_data init_data;
1824 struct qed_spq_entry *p_ent;
1825 dma_addr_t ramrod_res_phys;
1826 int rc = -ENOMEM;
1827
1828 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1829
1830 if (!qp->req_offloaded)
1831 return 0;
1832
1833 p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
1834 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1835 sizeof(*p_ramrod_res),
1836 &ramrod_res_phys, GFP_KERNEL);
1837 if (!p_ramrod_res) {
1838 DP_NOTICE(p_hwfn,
1839 "qed destroy requester failed: cannot allocate memory (ramrod)\n");
1840 return rc;
1841 }
1842
1843 /* Get SPQ entry */
1844 memset(&init_data, 0, sizeof(init_data));
1845 init_data.cid = qp->icid + 1;
1846 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1847 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1848
1849 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
1850 PROTOCOLID_ROCE, &init_data);
1851 if (rc)
1852 goto err;
1853
1854 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
1855 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1856
1857 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1858 if (rc)
1859 goto err;
1860
1861 *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
1862
1863 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
1864 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1865 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
1866 qp->orq, qp->orq_phys_addr);
1867
1868 qp->req_offloaded = false;
1869
1870 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
1871
1872err:
1873 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
1874 p_ramrod_res, ramrod_res_phys);
1875
1876 return rc;
1877}
1878
Yuval Mintz8c93bea2016-10-13 22:57:03 +03001879static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
1880 struct qed_rdma_qp *qp,
1881 struct qed_rdma_query_qp_out_params *out_params)
Ram Amranif1093942016-10-01 21:59:59 +03001882{
1883 struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
1884 struct roce_query_qp_req_output_params *p_req_ramrod_res;
1885 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
1886 struct roce_query_qp_req_ramrod_data *p_req_ramrod;
1887 struct qed_sp_init_data init_data;
1888 dma_addr_t resp_ramrod_res_phys;
1889 dma_addr_t req_ramrod_res_phys;
1890 struct qed_spq_entry *p_ent;
1891 bool rq_err_state;
1892 bool sq_err_state;
1893 bool sq_draining;
1894 int rc = -ENOMEM;
1895
1896 if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
1897 /* We can't send ramrod to the fw since this qp wasn't offloaded
1898 * to the fw yet
1899 */
1900 out_params->draining = false;
1901 out_params->rq_psn = qp->rq_psn;
1902 out_params->sq_psn = qp->sq_psn;
1903 out_params->state = qp->cur_state;
1904
1905 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
1906 return 0;
1907 }
1908
1909 if (!(qp->resp_offloaded)) {
1910 DP_NOTICE(p_hwfn,
1911 "The responder's qp should be offloded before requester's\n");
1912 return -EINVAL;
1913 }
1914
1915 /* Send a query responder ramrod to FW to get RQ-PSN and state */
1916 p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
1917 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1918 sizeof(*p_resp_ramrod_res),
1919 &resp_ramrod_res_phys, GFP_KERNEL);
1920 if (!p_resp_ramrod_res) {
1921 DP_NOTICE(p_hwfn,
1922 "qed query qp failed: cannot allocate memory (ramrod)\n");
1923 return rc;
1924 }
1925
1926 /* Get SPQ entry */
1927 memset(&init_data, 0, sizeof(init_data));
1928 init_data.cid = qp->icid;
1929 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1930 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1931 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
1932 PROTOCOLID_ROCE, &init_data);
1933 if (rc)
1934 goto err_resp;
1935
1936 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
1937 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
1938
1939 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1940 if (rc)
1941 goto err_resp;
1942
Ram Amranif1093942016-10-01 21:59:59 +03001943 out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
1944 rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
1945 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
1946
Ram Amranic5212b92017-02-20 22:43:31 +02001947 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
1948 p_resp_ramrod_res, resp_ramrod_res_phys);
1949
Ram Amranif1093942016-10-01 21:59:59 +03001950 if (!(qp->req_offloaded)) {
1951 /* Don't send query qp for the requester */
1952 out_params->sq_psn = qp->sq_psn;
1953 out_params->draining = false;
1954
1955 if (rq_err_state)
1956 qp->cur_state = QED_ROCE_QP_STATE_ERR;
1957
1958 out_params->state = qp->cur_state;
1959
1960 return 0;
1961 }
1962
1963 /* Send a query requester ramrod to FW to get SQ-PSN and state */
1964 p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
1965 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1966 sizeof(*p_req_ramrod_res),
1967 &req_ramrod_res_phys,
1968 GFP_KERNEL);
1969 if (!p_req_ramrod_res) {
1970 rc = -ENOMEM;
1971 DP_NOTICE(p_hwfn,
1972 "qed query qp failed: cannot allocate memory (ramrod)\n");
1973 return rc;
1974 }
1975
1976 /* Get SPQ entry */
1977 init_data.cid = qp->icid + 1;
1978 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
1979 PROTOCOLID_ROCE, &init_data);
1980 if (rc)
1981 goto err_req;
1982
1983 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
1984 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
1985
1986 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1987 if (rc)
1988 goto err_req;
1989
Ram Amranif1093942016-10-01 21:59:59 +03001990 out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
1991 sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
1992 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
1993 sq_draining =
1994 GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
1995 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
1996
Ram Amranic5212b92017-02-20 22:43:31 +02001997 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
1998 p_req_ramrod_res, req_ramrod_res_phys);
1999
Ram Amranif1093942016-10-01 21:59:59 +03002000 out_params->draining = false;
2001
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002002 if (rq_err_state || sq_err_state)
Ram Amranif1093942016-10-01 21:59:59 +03002003 qp->cur_state = QED_ROCE_QP_STATE_ERR;
Ram Amranif1093942016-10-01 21:59:59 +03002004 else if (sq_draining)
2005 out_params->draining = true;
2006 out_params->state = qp->cur_state;
2007
2008 return 0;
2009
2010err_req:
2011 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
2012 p_req_ramrod_res, req_ramrod_res_phys);
2013 return rc;
2014err_resp:
2015 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
2016 p_resp_ramrod_res, resp_ramrod_res_phys);
2017 return rc;
2018}
2019
Yuval Mintz8c93bea2016-10-13 22:57:03 +03002020static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
Ram Amranif1093942016-10-01 21:59:59 +03002021{
2022 u32 num_invalidated_mw = 0;
2023 u32 num_bound_mw = 0;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002024 u32 cq_prod;
Ram Amranif1093942016-10-01 21:59:59 +03002025 int rc;
2026
2027 /* Destroys the specified QP */
2028 if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
2029 (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
2030 (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
2031 DP_NOTICE(p_hwfn,
2032 "QP must be in error, reset or init state before destroying it\n");
2033 return -EINVAL;
2034 }
2035
Ram Amrani300c0d72017-02-20 22:43:32 +02002036 if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
2037 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002038 &num_invalidated_mw,
2039 &cq_prod);
Ram Amrani300c0d72017-02-20 22:43:32 +02002040 if (rc)
2041 return rc;
Ram Amranif1093942016-10-01 21:59:59 +03002042
Ram Amrani300c0d72017-02-20 22:43:32 +02002043 /* Send destroy requester ramrod */
2044 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
2045 &num_bound_mw);
2046 if (rc)
2047 return rc;
Ram Amranif1093942016-10-01 21:59:59 +03002048
Ram Amrani300c0d72017-02-20 22:43:32 +02002049 if (num_invalidated_mw != num_bound_mw) {
2050 DP_NOTICE(p_hwfn,
2051 "number of invalidate memory windows is different from bounded ones\n");
2052 return -EINVAL;
2053 }
Ram Amranif1093942016-10-01 21:59:59 +03002054 }
2055
Ram Amranif1093942016-10-01 21:59:59 +03002056 return 0;
2057}
2058
Yuval Mintz0189efb2016-10-13 22:57:02 +03002059static int qed_rdma_query_qp(void *rdma_cxt,
2060 struct qed_rdma_qp *qp,
2061 struct qed_rdma_query_qp_out_params *out_params)
Ram Amranif1093942016-10-01 21:59:59 +03002062{
2063 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2064 int rc;
2065
2066 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
2067
2068 /* The following fields are filled in from qp and not FW as they can't
2069 * be modified by FW
2070 */
2071 out_params->mtu = qp->mtu;
2072 out_params->dest_qp = qp->dest_qp;
2073 out_params->incoming_atomic_en = qp->incoming_atomic_en;
2074 out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
2075 out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
2076 out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
2077 out_params->dgid = qp->dgid;
2078 out_params->flow_label = qp->flow_label;
2079 out_params->hop_limit_ttl = qp->hop_limit_ttl;
2080 out_params->traffic_class_tos = qp->traffic_class_tos;
2081 out_params->timeout = qp->ack_timeout;
2082 out_params->rnr_retry = qp->rnr_retry_cnt;
2083 out_params->retry_cnt = qp->retry_cnt;
2084 out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
2085 out_params->pkey_index = 0;
2086 out_params->max_rd_atomic = qp->max_rd_atomic_req;
2087 out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
2088 out_params->sqd_async = qp->sqd_async;
2089
2090 rc = qed_roce_query_qp(p_hwfn, qp, out_params);
2091
2092 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
2093 return rc;
2094}
2095
Yuval Mintz0189efb2016-10-13 22:57:02 +03002096static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
Ram Amranif1093942016-10-01 21:59:59 +03002097{
2098 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2099 int rc = 0;
2100
2101 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
2102
2103 rc = qed_roce_destroy_qp(p_hwfn, qp);
2104
2105 /* free qp params struct */
2106 kfree(qp);
2107
2108 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
2109 return rc;
2110}
2111
Yuval Mintz8c93bea2016-10-13 22:57:03 +03002112static struct qed_rdma_qp *
Ram Amranif1093942016-10-01 21:59:59 +03002113qed_rdma_create_qp(void *rdma_cxt,
2114 struct qed_rdma_create_qp_in_params *in_params,
2115 struct qed_rdma_create_qp_out_params *out_params)
2116{
2117 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2118 struct qed_rdma_qp *qp;
2119 u8 max_stats_queues;
2120 int rc;
2121
2122 if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
2123 DP_ERR(p_hwfn->cdev,
2124 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
2125 rdma_cxt, in_params, out_params);
2126 return NULL;
2127 }
2128
2129 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2130 "qed rdma create qp called with qp_handle = %08x%08x\n",
2131 in_params->qp_handle_hi, in_params->qp_handle_lo);
2132
2133 /* Some sanity checks... */
2134 max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
2135 if (in_params->stats_queue >= max_stats_queues) {
2136 DP_ERR(p_hwfn->cdev,
2137 "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
2138 in_params->stats_queue, max_stats_queues);
2139 return NULL;
2140 }
2141
2142 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2143 if (!qp) {
2144 DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n");
2145 return NULL;
2146 }
2147
2148 rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
2149 qp->qpid = ((0xFF << 16) | qp->icid);
2150
2151 DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
2152
2153 if (rc) {
2154 kfree(qp);
2155 return NULL;
2156 }
2157
2158 qp->cur_state = QED_ROCE_QP_STATE_RESET;
2159 qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
2160 qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
2161 qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
2162 qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
2163 qp->use_srq = in_params->use_srq;
2164 qp->signal_all = in_params->signal_all;
2165 qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
2166 qp->pd = in_params->pd;
2167 qp->dpi = in_params->dpi;
2168 qp->sq_cq_id = in_params->sq_cq_id;
2169 qp->sq_num_pages = in_params->sq_num_pages;
2170 qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
2171 qp->rq_cq_id = in_params->rq_cq_id;
2172 qp->rq_num_pages = in_params->rq_num_pages;
2173 qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
2174 qp->srq_id = in_params->srq_id;
2175 qp->req_offloaded = false;
2176 qp->resp_offloaded = false;
2177 qp->e2e_flow_control_en = qp->use_srq ? false : true;
2178 qp->stats_queue = in_params->stats_queue;
2179
2180 out_params->icid = qp->icid;
2181 out_params->qp_id = qp->qpid;
2182
2183 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
2184 return qp;
2185}
2186
2187static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
2188 struct qed_rdma_qp *qp,
2189 enum qed_roce_qp_state prev_state,
2190 struct qed_rdma_modify_qp_in_params *params)
2191{
2192 u32 num_invalidated_mw = 0, num_bound_mw = 0;
2193 int rc = 0;
2194
2195 /* Perform additional operations according to the current state and the
2196 * next state
2197 */
2198 if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
2199 (prev_state == QED_ROCE_QP_STATE_RESET)) &&
2200 (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
2201 /* Init->RTR or Reset->RTR */
2202 rc = qed_roce_sp_create_responder(p_hwfn, qp);
2203 return rc;
2204 } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
2205 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2206 /* RTR-> RTS */
2207 rc = qed_roce_sp_create_requester(p_hwfn, qp);
2208 if (rc)
2209 return rc;
2210
2211 /* Send modify responder ramrod */
2212 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2213 params->modify_flags);
2214 return rc;
2215 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
2216 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2217 /* RTS->RTS */
2218 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2219 params->modify_flags);
2220 if (rc)
2221 return rc;
2222
2223 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2224 params->modify_flags);
2225 return rc;
2226 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
2227 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
2228 /* RTS->SQD */
2229 rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
2230 params->modify_flags);
2231 return rc;
2232 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
2233 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
2234 /* SQD->SQD */
2235 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2236 params->modify_flags);
2237 if (rc)
2238 return rc;
2239
2240 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2241 params->modify_flags);
2242 return rc;
2243 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
2244 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2245 /* SQD->RTS */
2246 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2247 params->modify_flags);
2248 if (rc)
2249 return rc;
2250
2251 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2252 params->modify_flags);
2253
2254 return rc;
Ram Amraniba0154e2017-04-30 11:49:06 +03002255 } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) {
Ram Amranif1093942016-10-01 21:59:59 +03002256 /* ->ERR */
2257 rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
2258 params->modify_flags);
2259 if (rc)
2260 return rc;
2261
2262 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
2263 params->modify_flags);
2264 return rc;
2265 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
2266 /* Any state -> RESET */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002267 u32 cq_prod;
Ram Amranif1093942016-10-01 21:59:59 +03002268
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002269 /* Send destroy responder ramrod */
2270 rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
2271 qp,
2272 &num_invalidated_mw,
2273 &cq_prod);
2274
Ram Amranif1093942016-10-01 21:59:59 +03002275 if (rc)
2276 return rc;
2277
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002278 qp->cq_prod = cq_prod;
2279
Ram Amranif1093942016-10-01 21:59:59 +03002280 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
2281 &num_bound_mw);
2282
2283 if (num_invalidated_mw != num_bound_mw) {
2284 DP_NOTICE(p_hwfn,
2285 "number of invalidate memory windows is different from bounded ones\n");
2286 return -EINVAL;
2287 }
2288 } else {
2289 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
2290 }
2291
2292 return rc;
2293}
2294
Yuval Mintz0189efb2016-10-13 22:57:02 +03002295static int qed_rdma_modify_qp(void *rdma_cxt,
2296 struct qed_rdma_qp *qp,
2297 struct qed_rdma_modify_qp_in_params *params)
Ram Amranif1093942016-10-01 21:59:59 +03002298{
2299 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2300 enum qed_roce_qp_state prev_state;
2301 int rc = 0;
2302
2303 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
2304 qp->icid, params->new_state);
2305
2306 if (rc) {
2307 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2308 return rc;
2309 }
2310
2311 if (GET_FIELD(params->modify_flags,
2312 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
2313 qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
2314 qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
2315 qp->incoming_atomic_en = params->incoming_atomic_en;
2316 }
2317
2318 /* Update QP structure with the updated values */
2319 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
2320 qp->roce_mode = params->roce_mode;
2321 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
2322 qp->pkey = params->pkey;
2323 if (GET_FIELD(params->modify_flags,
2324 QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
2325 qp->e2e_flow_control_en = params->e2e_flow_control_en;
2326 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
2327 qp->dest_qp = params->dest_qp;
2328 if (GET_FIELD(params->modify_flags,
2329 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
2330 /* Indicates that the following parameters have changed:
2331 * Traffic class, flow label, hop limit, source GID,
2332 * destination GID, loopback indicator
2333 */
2334 qp->traffic_class_tos = params->traffic_class_tos;
2335 qp->flow_label = params->flow_label;
2336 qp->hop_limit_ttl = params->hop_limit_ttl;
2337
2338 qp->sgid = params->sgid;
2339 qp->dgid = params->dgid;
2340 qp->udp_src_port = 0;
2341 qp->vlan_id = params->vlan_id;
2342 qp->mtu = params->mtu;
2343 qp->lb_indication = params->lb_indication;
2344 memcpy((u8 *)&qp->remote_mac_addr[0],
2345 (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
2346 if (params->use_local_mac) {
2347 memcpy((u8 *)&qp->local_mac_addr[0],
2348 (u8 *)&params->local_mac_addr[0], ETH_ALEN);
2349 } else {
2350 memcpy((u8 *)&qp->local_mac_addr[0],
2351 (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
2352 }
2353 }
2354 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
2355 qp->rq_psn = params->rq_psn;
2356 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
2357 qp->sq_psn = params->sq_psn;
2358 if (GET_FIELD(params->modify_flags,
2359 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
2360 qp->max_rd_atomic_req = params->max_rd_atomic_req;
2361 if (GET_FIELD(params->modify_flags,
2362 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
2363 qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
2364 if (GET_FIELD(params->modify_flags,
2365 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
2366 qp->ack_timeout = params->ack_timeout;
2367 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
2368 qp->retry_cnt = params->retry_cnt;
2369 if (GET_FIELD(params->modify_flags,
2370 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
2371 qp->rnr_retry_cnt = params->rnr_retry_cnt;
2372 if (GET_FIELD(params->modify_flags,
2373 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
2374 qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
2375
2376 qp->sqd_async = params->sqd_async;
2377
2378 prev_state = qp->cur_state;
2379 if (GET_FIELD(params->modify_flags,
2380 QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
2381 qp->cur_state = params->new_state;
2382 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
2383 qp->cur_state);
2384 }
2385
2386 rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
2387
2388 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
2389 return rc;
2390}
2391
Yuval Mintz0189efb2016-10-13 22:57:02 +03002392static int
2393qed_rdma_register_tid(void *rdma_cxt,
2394 struct qed_rdma_register_tid_in_params *params)
Ram Amraniee8eaea2016-10-01 22:00:00 +03002395{
2396 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2397 struct rdma_register_tid_ramrod_data *p_ramrod;
2398 struct qed_sp_init_data init_data;
2399 struct qed_spq_entry *p_ent;
2400 enum rdma_tid_type tid_type;
2401 u8 fw_return_code;
2402 int rc;
2403
2404 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
2405
2406 /* Get SPQ entry */
2407 memset(&init_data, 0, sizeof(init_data));
2408 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2409 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2410
2411 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
2412 p_hwfn->p_rdma_info->proto, &init_data);
2413 if (rc) {
2414 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2415 return rc;
2416 }
2417
2418 if (p_hwfn->p_rdma_info->last_tid < params->itid)
2419 p_hwfn->p_rdma_info->last_tid = params->itid;
2420
2421 p_ramrod = &p_ent->ramrod.rdma_register_tid;
2422
2423 p_ramrod->flags = 0;
2424 SET_FIELD(p_ramrod->flags,
2425 RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
2426 params->pbl_two_level);
2427
2428 SET_FIELD(p_ramrod->flags,
2429 RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
2430
2431 SET_FIELD(p_ramrod->flags,
2432 RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
2433
2434 /* Don't initialize D/C field, as it may override other bits. */
2435 if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
2436 SET_FIELD(p_ramrod->flags,
2437 RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
2438 params->page_size_log - 12);
2439
2440 SET_FIELD(p_ramrod->flags,
Ram Amraniee8eaea2016-10-01 22:00:00 +03002441 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
2442 params->remote_read);
2443
2444 SET_FIELD(p_ramrod->flags,
2445 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
2446 params->remote_write);
2447
2448 SET_FIELD(p_ramrod->flags,
2449 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
2450 params->remote_atomic);
2451
2452 SET_FIELD(p_ramrod->flags,
2453 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
2454 params->local_write);
2455
2456 SET_FIELD(p_ramrod->flags,
2457 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
2458
2459 SET_FIELD(p_ramrod->flags,
2460 RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
2461 params->mw_bind);
2462
2463 SET_FIELD(p_ramrod->flags1,
2464 RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
2465 params->pbl_page_size_log - 12);
2466
2467 SET_FIELD(p_ramrod->flags2,
2468 RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
2469
2470 switch (params->tid_type) {
2471 case QED_RDMA_TID_REGISTERED_MR:
2472 tid_type = RDMA_TID_REGISTERED_MR;
2473 break;
2474 case QED_RDMA_TID_FMR:
2475 tid_type = RDMA_TID_FMR;
2476 break;
2477 case QED_RDMA_TID_MW_TYPE1:
2478 tid_type = RDMA_TID_MW_TYPE1;
2479 break;
2480 case QED_RDMA_TID_MW_TYPE2A:
2481 tid_type = RDMA_TID_MW_TYPE2A;
2482 break;
2483 default:
2484 rc = -EINVAL;
2485 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2486 return rc;
2487 }
2488 SET_FIELD(p_ramrod->flags1,
2489 RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
2490
2491 p_ramrod->itid = cpu_to_le32(params->itid);
2492 p_ramrod->key = params->key;
2493 p_ramrod->pd = cpu_to_le16(params->pd);
2494 p_ramrod->length_hi = (u8)(params->length >> 32);
2495 p_ramrod->length_lo = DMA_LO_LE(params->length);
2496 if (params->zbva) {
2497 /* Lower 32 bits of the registered MR address.
2498 * In case of zero based MR, will hold FBO
2499 */
2500 p_ramrod->va.hi = 0;
2501 p_ramrod->va.lo = cpu_to_le32(params->fbo);
2502 } else {
2503 DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
2504 }
2505 DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
2506
2507 /* DIF */
2508 if (params->dif_enabled) {
2509 SET_FIELD(p_ramrod->flags2,
2510 RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
2511 DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
2512 params->dif_error_addr);
2513 DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
2514 }
2515
2516 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
Ram Amrani10536192017-04-30 11:49:07 +03002517 if (rc)
2518 return rc;
Ram Amraniee8eaea2016-10-01 22:00:00 +03002519
2520 if (fw_return_code != RDMA_RETURN_OK) {
2521 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
2522 return -EINVAL;
2523 }
2524
2525 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
2526 return rc;
2527}
2528
Yuval Mintz0189efb2016-10-13 22:57:02 +03002529static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
Ram Amraniee8eaea2016-10-01 22:00:00 +03002530{
2531 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2532 struct rdma_deregister_tid_ramrod_data *p_ramrod;
2533 struct qed_sp_init_data init_data;
2534 struct qed_spq_entry *p_ent;
2535 struct qed_ptt *p_ptt;
2536 u8 fw_return_code;
2537 int rc;
2538
2539 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
2540
2541 /* Get SPQ entry */
2542 memset(&init_data, 0, sizeof(init_data));
2543 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2544 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2545
2546 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
2547 p_hwfn->p_rdma_info->proto, &init_data);
2548 if (rc) {
2549 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2550 return rc;
2551 }
2552
2553 p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
2554 p_ramrod->itid = cpu_to_le32(itid);
2555
2556 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2557 if (rc) {
2558 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2559 return rc;
2560 }
2561
2562 if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
2563 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
2564 return -EINVAL;
2565 } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
2566 /* Bit indicating that the TID is in use and a nig drain is
2567 * required before sending the ramrod again
2568 */
2569 p_ptt = qed_ptt_acquire(p_hwfn);
2570 if (!p_ptt) {
2571 rc = -EBUSY;
2572 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2573 "Failed to acquire PTT\n");
2574 return rc;
2575 }
2576
2577 rc = qed_mcp_drain(p_hwfn, p_ptt);
2578 if (rc) {
2579 qed_ptt_release(p_hwfn, p_ptt);
2580 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2581 "Drain failed\n");
2582 return rc;
2583 }
2584
2585 qed_ptt_release(p_hwfn, p_ptt);
2586
2587 /* Resend the ramrod */
2588 rc = qed_sp_init_request(p_hwfn, &p_ent,
2589 RDMA_RAMROD_DEREGISTER_MR,
2590 p_hwfn->p_rdma_info->proto,
2591 &init_data);
2592 if (rc) {
2593 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2594 "Failed to init sp-element\n");
2595 return rc;
2596 }
2597
2598 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2599 if (rc) {
2600 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2601 "Ramrod failed\n");
2602 return rc;
2603 }
2604
2605 if (fw_return_code != RDMA_RETURN_OK) {
2606 DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
2607 fw_return_code);
2608 return rc;
2609 }
2610 }
2611
2612 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
2613 return rc;
2614}
2615
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002616static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
2617{
2618 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
2619 u32 start_cid, cid, xcid;
2620
2621 /* an even icid belongs to a responder while an odd icid belongs to a
2622 * requester. The 'cid' received as an input can be either. We calculate
2623 * the "partner" icid and call it xcid. Only if both are free then the
2624 * "cid" map can be cleared.
2625 */
2626 start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
2627 cid = icid - start_cid;
2628 xcid = cid ^ 1;
2629
2630 spin_lock_bh(&p_rdma_info->lock);
2631
2632 qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
2633 if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
2634 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
2635 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
2636 }
2637
2638 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
2639}
2640
Ram Amrani51ff1722016-10-01 21:59:57 +03002641static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
2642{
2643 return QED_LEADING_HWFN(cdev);
2644}
2645
Mintz, Yuval9331dad2017-06-20 16:00:02 +03002646static bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
2647{
2648 bool result;
2649
2650 /* if rdma info has not been allocated, naturally there are no qps */
2651 if (!p_hwfn->p_rdma_info)
2652 return false;
2653
2654 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
2655 if (!p_hwfn->p_rdma_info->cid_map.bitmap)
2656 result = false;
2657 else
2658 result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map);
2659 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
2660 return result;
2661}
2662
Ram Amrani51ff1722016-10-01 21:59:57 +03002663static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2664{
2665 u32 val;
2666
2667 val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
2668
2669 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
2670 DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
2671 "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
2672 val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
2673}
2674
Mintz, Yuval9331dad2017-06-20 16:00:02 +03002675void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2676{
2677 u8 val;
2678
2679 /* if any QPs are already active, we want to disable DPM, since their
2680 * context information contains information from before the latest DCBx
2681 * update. Otherwise enable it.
2682 */
2683 val = qed_rdma_allocated_qps(p_hwfn) ? true : false;
2684 p_hwfn->dcbx_no_edpm = (u8)val;
2685
2686 qed_rdma_dpm_conf(p_hwfn, p_ptt);
2687}
2688
Ram Amrani51ff1722016-10-01 21:59:57 +03002689void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2690{
2691 p_hwfn->db_bar_no_edpm = true;
2692
2693 qed_rdma_dpm_conf(p_hwfn, p_ptt);
2694}
2695
Yuval Mintz0189efb2016-10-13 22:57:02 +03002696static int qed_rdma_start(void *rdma_cxt,
2697 struct qed_rdma_start_in_params *params)
Ram Amrani51ff1722016-10-01 21:59:57 +03002698{
2699 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2700 struct qed_ptt *p_ptt;
2701 int rc = -EBUSY;
2702
2703 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2704 "desired_cnq = %08x\n", params->desired_cnq);
2705
2706 p_ptt = qed_ptt_acquire(p_hwfn);
2707 if (!p_ptt)
2708 goto err;
2709
2710 rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
2711 if (rc)
2712 goto err1;
2713
2714 rc = qed_rdma_setup(p_hwfn, p_ptt, params);
2715 if (rc)
2716 goto err2;
2717
2718 qed_ptt_release(p_hwfn, p_ptt);
2719
2720 return rc;
2721
2722err2:
2723 qed_rdma_free(p_hwfn);
2724err1:
2725 qed_ptt_release(p_hwfn, p_ptt);
2726err:
2727 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
2728 return rc;
2729}
2730
2731static int qed_rdma_init(struct qed_dev *cdev,
2732 struct qed_rdma_start_in_params *params)
2733{
2734 return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
2735}
2736
Yuval Mintz0189efb2016-10-13 22:57:02 +03002737static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
Ram Amrani51ff1722016-10-01 21:59:57 +03002738{
2739 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2740
2741 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
2742
2743 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
2744 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
2745 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
2746}
2747
Ram Amraniabd49672016-10-01 22:00:01 +03002748static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
2749 u8 *old_mac_address,
2750 u8 *new_mac_address)
2751{
Michal Kalderon0518c122017-06-09 17:13:22 +03002752 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
Ram Amraniabd49672016-10-01 22:00:01 +03002753 struct qed_ptt *p_ptt;
2754 int rc = 0;
2755
Michal Kalderon0518c122017-06-09 17:13:22 +03002756 p_ptt = qed_ptt_acquire(p_hwfn);
Ram Amraniabd49672016-10-01 22:00:01 +03002757 if (!p_ptt) {
2758 DP_ERR(cdev,
2759 "qed roce ll2 mac filter set: failed to acquire PTT\n");
2760 return -EINVAL;
2761 }
2762
Ram Amraniabd49672016-10-01 22:00:01 +03002763 if (old_mac_address)
Michal Kalderon0518c122017-06-09 17:13:22 +03002764 qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address);
Ram Amraniabd49672016-10-01 22:00:01 +03002765 if (new_mac_address)
Michal Kalderon0518c122017-06-09 17:13:22 +03002766 rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address);
Ram Amraniabd49672016-10-01 22:00:01 +03002767
Michal Kalderon0518c122017-06-09 17:13:22 +03002768 qed_ptt_release(p_hwfn, p_ptt);
Ram Amraniabd49672016-10-01 22:00:01 +03002769
2770 if (rc)
2771 DP_ERR(cdev,
Michal Kalderon0518c122017-06-09 17:13:22 +03002772 "qed roce ll2 mac filter set: failed to add MAC filter\n");
Ram Amraniabd49672016-10-01 22:00:01 +03002773
2774 return rc;
2775}
2776
Ram Amrani51ff1722016-10-01 21:59:57 +03002777static const struct qed_rdma_ops qed_rdma_ops_pass = {
2778 .common = &qed_common_ops_pass,
2779 .fill_dev_info = &qed_fill_rdma_dev_info,
2780 .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
2781 .rdma_init = &qed_rdma_init,
2782 .rdma_add_user = &qed_rdma_add_user,
2783 .rdma_remove_user = &qed_rdma_remove_user,
2784 .rdma_stop = &qed_rdma_stop,
Ram Amranic295f862016-10-01 21:59:58 +03002785 .rdma_query_port = &qed_rdma_query_port,
Ram Amrani51ff1722016-10-01 21:59:57 +03002786 .rdma_query_device = &qed_rdma_query_device,
2787 .rdma_get_start_sb = &qed_rdma_get_sb_start,
2788 .rdma_get_rdma_int = &qed_rdma_get_int,
2789 .rdma_set_rdma_int = &qed_rdma_set_int,
2790 .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
2791 .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
Ram Amranic295f862016-10-01 21:59:58 +03002792 .rdma_alloc_pd = &qed_rdma_alloc_pd,
2793 .rdma_dealloc_pd = &qed_rdma_free_pd,
2794 .rdma_create_cq = &qed_rdma_create_cq,
2795 .rdma_destroy_cq = &qed_rdma_destroy_cq,
Ram Amranif1093942016-10-01 21:59:59 +03002796 .rdma_create_qp = &qed_rdma_create_qp,
2797 .rdma_modify_qp = &qed_rdma_modify_qp,
2798 .rdma_query_qp = &qed_rdma_query_qp,
2799 .rdma_destroy_qp = &qed_rdma_destroy_qp,
Ram Amraniee8eaea2016-10-01 22:00:00 +03002800 .rdma_alloc_tid = &qed_rdma_alloc_tid,
2801 .rdma_free_tid = &qed_rdma_free_tid,
2802 .rdma_register_tid = &qed_rdma_register_tid,
2803 .rdma_deregister_tid = &qed_rdma_deregister_tid,
Michal Kalderon0518c122017-06-09 17:13:22 +03002804 .ll2_acquire_connection = &qed_ll2_acquire_connection,
2805 .ll2_establish_connection = &qed_ll2_establish_connection,
2806 .ll2_terminate_connection = &qed_ll2_terminate_connection,
2807 .ll2_release_connection = &qed_ll2_release_connection,
2808 .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer,
2809 .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet,
2810 .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
2811 .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
2812 .ll2_get_stats = &qed_ll2_get_stats,
Ram Amrani51ff1722016-10-01 21:59:57 +03002813};
2814
Arnd Bergmannd4e99132016-10-10 13:59:16 +02002815const struct qed_rdma_ops *qed_get_rdma_ops(void)
Ram Amrani51ff1722016-10-01 21:59:57 +03002816{
2817 return &qed_rdma_ops_pass;
2818}
2819EXPORT_SYMBOL(qed_get_rdma_ops);