blob: 9dc41a899ef2429b78911a625c1b268c1a4051f5 [file] [log] [blame]
Kalderon, Michalf1372ee2017-06-21 16:22:44 +03001/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/types.h>
33#include <asm/byteorder.h>
34#include <linux/bitops.h>
35#include <linux/delay.h>
36#include <linux/dma-mapping.h>
37#include <linux/errno.h>
38#include <linux/io.h>
39#include <linux/kernel.h>
40#include <linux/list.h>
41#include <linux/module.h>
42#include <linux/mutex.h>
43#include <linux/pci.h>
44#include <linux/slab.h>
45#include <linux/spinlock.h>
46#include <linux/string.h>
47#include "qed.h"
48#include "qed_cxt.h"
49#include "qed_hsi.h"
50#include "qed_hw.h"
51#include "qed_init_ops.h"
52#include "qed_int.h"
53#include "qed_ll2.h"
54#include "qed_mcp.h"
55#include "qed_reg_addr.h"
56#include "qed_roce.h"
57#include <linux/qed/qed_roce_if.h>
58#include "qed_sp.h"
59
60static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
61
62static int
63qed_roce_async_event(struct qed_hwfn *p_hwfn,
64 u8 fw_event_code,
65 u16 echo, union event_ring_data *data, u8 fw_return_code)
66{
67 if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
68 u16 icid =
69 (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid);
70
71 /* icid release in this async event can occur only if the icid
72 * was offloaded to the FW. In case it wasn't offloaded this is
73 * handled in qed_roce_sp_destroy_qp.
74 */
75 qed_roce_free_real_icid(p_hwfn, icid);
76 } else {
77 struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
78
79 events->affiliated_event(p_hwfn->p_rdma_info->events.context,
80 fw_event_code,
81 (void *)&data->rdma_data.async_handle);
82 }
83
84 return 0;
85}
86
87static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
88 struct qed_bmap *bmap, u32 max_count, char *name)
89{
90 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
91
92 bmap->max_count = max_count;
93
94 bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long),
95 GFP_KERNEL);
96 if (!bmap->bitmap)
97 return -ENOMEM;
98
99 snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
100
101 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
102 return 0;
103}
104
105static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
106 struct qed_bmap *bmap, u32 *id_num)
107{
108 *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
109 if (*id_num >= bmap->max_count)
110 return -EINVAL;
111
112 __set_bit(*id_num, bmap->bitmap);
113
114 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
115 bmap->name, *id_num);
116
117 return 0;
118}
119
120static void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
121 struct qed_bmap *bmap, u32 id_num)
122{
123 if (id_num >= bmap->max_count)
124 return;
125
126 __set_bit(id_num, bmap->bitmap);
127}
128
129static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
130 struct qed_bmap *bmap, u32 id_num)
131{
132 bool b_acquired;
133
134 if (id_num >= bmap->max_count)
135 return;
136
137 b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
138 if (!b_acquired) {
139 DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
140 bmap->name, id_num);
141 return;
142 }
143
144 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
145 bmap->name, id_num);
146}
147
148static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
149 struct qed_bmap *bmap, u32 id_num)
150{
151 if (id_num >= bmap->max_count)
152 return -1;
153
154 return test_bit(id_num, bmap->bitmap);
155}
156
157static bool qed_bmap_is_empty(struct qed_bmap *bmap)
158{
159 return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
160}
161
162static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
163{
164 /* First sb id for RoCE is after all the l2 sb */
165 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
166}
167
168static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
169 struct qed_ptt *p_ptt,
170 struct qed_rdma_start_in_params *params)
171{
172 struct qed_rdma_info *p_rdma_info;
173 u32 num_cons, num_tasks;
174 int rc = -ENOMEM;
175
176 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
177
178 /* Allocate a struct with current pf rdma info */
179 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
180 if (!p_rdma_info)
181 return rc;
182
183 p_hwfn->p_rdma_info = p_rdma_info;
184 p_rdma_info->proto = PROTOCOLID_ROCE;
185
186 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
187 NULL);
188
189 p_rdma_info->num_qps = num_cons / 2;
190
191 num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
192
193 /* Each MR uses a single task */
194 p_rdma_info->num_mrs = num_tasks;
195
196 /* Queue zone lines are shared between RoCE and L2 in such a way that
197 * they can be used by each without obstructing the other.
198 */
199 p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
200 p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
201
202 /* Allocate a struct with device params and fill it */
203 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
204 if (!p_rdma_info->dev)
205 goto free_rdma_info;
206
207 /* Allocate a struct with port params and fill it */
208 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
209 if (!p_rdma_info->port)
210 goto free_rdma_dev;
211
212 /* Allocate bit map for pd's */
213 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
214 "PD");
215 if (rc) {
216 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
217 "Failed to allocate pd_map, rc = %d\n",
218 rc);
219 goto free_rdma_port;
220 }
221
222 /* Allocate DPI bitmap */
223 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
224 p_hwfn->dpi_count, "DPI");
225 if (rc) {
226 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
227 "Failed to allocate DPI bitmap, rc = %d\n", rc);
228 goto free_pd_map;
229 }
230
231 /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
232 * twice the number of QPs.
233 */
234 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
235 p_rdma_info->num_qps * 2, "CQ");
236 if (rc) {
237 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
238 "Failed to allocate cq bitmap, rc = %d\n", rc);
239 goto free_dpi_map;
240 }
241
242 /* Allocate bitmap for toggle bit for cq icids
243 * We toggle the bit every time we create or resize cq for a given icid.
244 * The maximum number of CQs is bounded to twice the number of QPs.
245 */
246 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
247 p_rdma_info->num_qps * 2, "Toggle");
248 if (rc) {
249 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
250 "Failed to allocate toogle bits, rc = %d\n", rc);
251 goto free_cq_map;
252 }
253
254 /* Allocate bitmap for itids */
255 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
256 p_rdma_info->num_mrs, "MR");
257 if (rc) {
258 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
259 "Failed to allocate itids bitmaps, rc = %d\n", rc);
260 goto free_toggle_map;
261 }
262
263 /* Allocate bitmap for cids used for qps. */
264 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
265 "CID");
266 if (rc) {
267 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
268 "Failed to allocate cid bitmap, rc = %d\n", rc);
269 goto free_tid_map;
270 }
271
272 /* Allocate bitmap for cids used for responders/requesters. */
273 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
274 "REAL_CID");
275 if (rc) {
276 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
277 "Failed to allocate real cid bitmap, rc = %d\n", rc);
278 goto free_cid_map;
279 }
280 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
281 return 0;
282
283free_cid_map:
284 kfree(p_rdma_info->cid_map.bitmap);
285free_tid_map:
286 kfree(p_rdma_info->tid_map.bitmap);
287free_toggle_map:
288 kfree(p_rdma_info->toggle_bits.bitmap);
289free_cq_map:
290 kfree(p_rdma_info->cq_map.bitmap);
291free_dpi_map:
292 kfree(p_rdma_info->dpi_map.bitmap);
293free_pd_map:
294 kfree(p_rdma_info->pd_map.bitmap);
295free_rdma_port:
296 kfree(p_rdma_info->port);
297free_rdma_dev:
298 kfree(p_rdma_info->dev);
299free_rdma_info:
300 kfree(p_rdma_info);
301
302 return rc;
303}
304
305static void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
306 struct qed_bmap *bmap, bool check)
307{
308 int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
309 int last_line = bmap->max_count / (64 * 8);
310 int last_item = last_line * 8 +
311 DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
312 u64 *pmap = (u64 *)bmap->bitmap;
313 int line, item, offset;
314 u8 str_last_line[200] = { 0 };
315
316 if (!weight || !check)
317 goto end;
318
319 DP_NOTICE(p_hwfn,
320 "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
321 bmap->name, bmap->max_count, weight);
322
323 /* print aligned non-zero lines, if any */
324 for (item = 0, line = 0; line < last_line; line++, item += 8)
325 if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
326 DP_NOTICE(p_hwfn,
327 "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
328 line,
329 pmap[item],
330 pmap[item + 1],
331 pmap[item + 2],
332 pmap[item + 3],
333 pmap[item + 4],
334 pmap[item + 5],
335 pmap[item + 6], pmap[item + 7]);
336
337 /* print last unaligned non-zero line, if any */
338 if ((bmap->max_count % (64 * 8)) &&
339 (bitmap_weight((unsigned long *)&pmap[item],
340 bmap->max_count - item * 64))) {
341 offset = sprintf(str_last_line, "line 0x%04x: ", line);
342 for (; item < last_item; item++)
343 offset += sprintf(str_last_line + offset,
344 "0x%016llx ", pmap[item]);
345 DP_NOTICE(p_hwfn, "%s\n", str_last_line);
346 }
347
348end:
349 kfree(bmap->bitmap);
350 bmap->bitmap = NULL;
351}
352
353static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
354{
355 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
356
357 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
358 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
359 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
360 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
361 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
362 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
363
364 kfree(p_rdma_info->port);
365 kfree(p_rdma_info->dev);
366
367 kfree(p_rdma_info);
368}
369
370static void qed_rdma_free(struct qed_hwfn *p_hwfn)
371{
372 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
373
374 qed_rdma_resc_free(p_hwfn);
375}
376
377static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
378{
379 guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
380 guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
381 guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
382 guid[3] = 0xff;
383 guid[4] = 0xfe;
384 guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
385 guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
386 guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
387}
388
389static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
390 struct qed_rdma_start_in_params *params)
391{
392 struct qed_rdma_events *events;
393
394 events = &p_hwfn->p_rdma_info->events;
395
396 events->unaffiliated_event = params->events->unaffiliated_event;
397 events->affiliated_event = params->events->affiliated_event;
398 events->context = params->events->context;
399}
400
401static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
402 struct qed_rdma_start_in_params *params)
403{
404 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
405 struct qed_dev *cdev = p_hwfn->cdev;
406 u32 pci_status_control;
407 u32 num_qps;
408
409 /* Vendor specific information */
410 dev->vendor_id = cdev->vendor_id;
411 dev->vendor_part_id = cdev->device_id;
412 dev->hw_ver = 0;
413 dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
414 (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
415
416 qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
417 dev->node_guid = dev->sys_image_guid;
418
419 dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
420 RDMA_MAX_SGE_PER_RQ_WQE);
421
422 if (cdev->rdma_max_sge)
423 dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
424
425 dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
426
427 dev->max_inline = (cdev->rdma_max_inline) ?
428 min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
429 dev->max_inline;
430
431 dev->max_wqe = QED_RDMA_MAX_WQE;
432 dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
433
434 /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
435 * it is up-aligned to 16 and then to ILT page size within qed cxt.
436 * This is OK in terms of ILT but we don't want to configure the FW
437 * above its abilities
438 */
439 num_qps = ROCE_MAX_QPS;
440 num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
441 dev->max_qp = num_qps;
442
443 /* CQs uses the same icids that QPs use hence they are limited by the
444 * number of icids. There are two icids per QP.
445 */
446 dev->max_cq = num_qps * 2;
447
448 /* The number of mrs is smaller by 1 since the first is reserved */
449 dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
450 dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
451
452 /* The maximum CQE capacity per CQ supported.
453 * max number of cqes will be in two layer pbl,
454 * 8 is the pointer size in bytes
455 * 32 is the size of cq element in bytes
456 */
457 if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
458 dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
459 else
460 dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
461
462 dev->max_mw = 0;
463 dev->max_fmr = QED_RDMA_MAX_FMR;
464 dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
465 dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
466 dev->max_pkey = QED_RDMA_MAX_P_KEY;
467
468 dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
469 (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
470 dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
471 RDMA_REQ_RD_ATOMIC_ELM_SIZE;
472 dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
473 p_hwfn->p_rdma_info->num_qps;
474 dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
475 dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
476 dev->max_pd = RDMA_MAX_PDS;
477 dev->max_ah = p_hwfn->p_rdma_info->num_qps;
478 dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
479
480 /* Set capablities */
481 dev->dev_caps = 0;
482 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
483 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
484 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
485 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
486 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
487 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
488 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
489 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
490
491 /* Check atomic operations support in PCI configuration space. */
492 pci_read_config_dword(cdev->pdev,
493 cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
494 &pci_status_control);
495
496 if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
497 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
498}
499
500static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
501{
502 struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
503 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
504
505 port->port_state = p_hwfn->mcp_info->link_output.link_up ?
506 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
507
508 port->max_msg_size = min_t(u64,
509 (dev->max_mr_mw_fmr_size *
510 p_hwfn->cdev->rdma_max_sge),
511 BIT(31));
512
513 port->pkey_bad_counter = 0;
514}
515
516static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
517{
518 u32 ll2_ethertype_en;
519
520 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
521 p_hwfn->b_rdma_enabled_in_prs = false;
522
523 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
524
525 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
526
527 /* We delay writing to this reg until first cid is allocated. See
528 * qed_cxt_dynamic_ilt_alloc function for more details
529 */
530 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
531 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
532 (ll2_ethertype_en | 0x01));
533
534 if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
535 DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
536 return -EINVAL;
537 }
538
539 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
540 return 0;
541}
542
543static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
544 struct qed_rdma_start_in_params *params,
545 struct qed_ptt *p_ptt)
546{
547 struct rdma_init_func_ramrod_data *p_ramrod;
548 struct qed_rdma_cnq_params *p_cnq_pbl_list;
549 struct rdma_init_func_hdr *p_params_header;
550 struct rdma_cnq_params *p_cnq_params;
551 struct qed_sp_init_data init_data;
552 struct qed_spq_entry *p_ent;
553 u32 cnq_id, sb_id;
554 u16 igu_sb_id;
555 int rc;
556
557 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
558
559 /* Save the number of cnqs for the function close ramrod */
560 p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
561
562 /* Get SPQ entry */
563 memset(&init_data, 0, sizeof(init_data));
564 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
565 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
566
567 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
568 p_hwfn->p_rdma_info->proto, &init_data);
569 if (rc)
570 return rc;
571
572 p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
573
574 p_params_header = &p_ramrod->params_header;
575 p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
576 QED_RDMA_CNQ_RAM);
577 p_params_header->num_cnqs = params->desired_cnq;
578
579 if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
580 p_params_header->cq_ring_mode = 1;
581 else
582 p_params_header->cq_ring_mode = 0;
583
584 for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
585 sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
586 igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
587 p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id);
588 p_cnq_params = &p_ramrod->cnq_params[cnq_id];
589 p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
590
591 p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
592 p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
593
594 DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
595 p_cnq_pbl_list->pbl_ptr);
596
597 /* we assume here that cnq_id and qz_offset are the same */
598 p_cnq_params->queue_zone_num =
599 cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
600 cnq_id);
601 }
602
603 return qed_spq_post(p_hwfn, p_ent, NULL);
604}
605
606static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
607{
608 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
609 int rc;
610
611 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
612
613 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
614 rc = qed_rdma_bmap_alloc_id(p_hwfn,
615 &p_hwfn->p_rdma_info->tid_map, itid);
616 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
617 if (rc)
618 goto out;
619
620 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
621out:
622 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
623 return rc;
624}
625
626static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
627{
628 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
629
630 /* The first DPI is reserved for the Kernel */
631 __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
632
633 /* Tid 0 will be used as the key for "reserved MR".
634 * The driver should allocate memory for it so it can be loaded but no
635 * ramrod should be passed on it.
636 */
637 qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
638 if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
639 DP_NOTICE(p_hwfn,
640 "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
641 return -EINVAL;
642 }
643
644 return 0;
645}
646
647static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
648 struct qed_ptt *p_ptt,
649 struct qed_rdma_start_in_params *params)
650{
651 int rc;
652
653 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
654
655 spin_lock_init(&p_hwfn->p_rdma_info->lock);
656
657 qed_rdma_init_devinfo(p_hwfn, params);
658 qed_rdma_init_port(p_hwfn);
659 qed_rdma_init_events(p_hwfn, params);
660
661 rc = qed_rdma_reserve_lkey(p_hwfn);
662 if (rc)
663 return rc;
664
665 rc = qed_rdma_init_hw(p_hwfn, p_ptt);
666 if (rc)
667 return rc;
668
669 qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
670 qed_roce_async_event);
671
672 return qed_rdma_start_fw(p_hwfn, params, p_ptt);
673}
674
675void qed_roce_stop(struct qed_hwfn *p_hwfn)
676{
677 struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
678 int wait_count = 0;
679
680 /* when destroying a_RoCE QP the control is returned to the user after
681 * the synchronous part. The asynchronous part may take a little longer.
682 * We delay for a short while if an async destroy QP is still expected.
683 * Beyond the added delay we clear the bitmap anyway.
684 */
685 while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
686 msleep(100);
687 if (wait_count++ > 20) {
688 DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
689 break;
690 }
691 }
692 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
693}
694
695static int qed_rdma_stop(void *rdma_cxt)
696{
697 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
698 struct rdma_close_func_ramrod_data *p_ramrod;
699 struct qed_sp_init_data init_data;
700 struct qed_spq_entry *p_ent;
701 struct qed_ptt *p_ptt;
702 u32 ll2_ethertype_en;
703 int rc = -EBUSY;
704
705 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
706
707 p_ptt = qed_ptt_acquire(p_hwfn);
708 if (!p_ptt) {
709 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
710 return rc;
711 }
712
713 /* Disable RoCE search */
714 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
715 p_hwfn->b_rdma_enabled_in_prs = false;
716
717 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
718
719 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
720
721 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
722 (ll2_ethertype_en & 0xFFFE));
723
724 qed_roce_stop(p_hwfn);
725 qed_ptt_release(p_hwfn, p_ptt);
726
727 /* Get SPQ entry */
728 memset(&init_data, 0, sizeof(init_data));
729 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
730 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
731
732 /* Stop RoCE */
733 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
734 p_hwfn->p_rdma_info->proto, &init_data);
735 if (rc)
736 goto out;
737
738 p_ramrod = &p_ent->ramrod.rdma_close_func;
739
740 p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
741 p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
742
743 rc = qed_spq_post(p_hwfn, p_ent, NULL);
744
745out:
746 qed_rdma_free(p_hwfn);
747
748 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
749 return rc;
750}
751
752static int qed_rdma_add_user(void *rdma_cxt,
753 struct qed_rdma_add_user_out_params *out_params)
754{
755 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
756 u32 dpi_start_offset;
757 u32 returned_id = 0;
758 int rc;
759
760 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
761
762 /* Allocate DPI */
763 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
764 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
765 &returned_id);
766 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
767
768 out_params->dpi = (u16)returned_id;
769
770 /* Calculate the corresponding DPI address */
771 dpi_start_offset = p_hwfn->dpi_start_offset;
772
773 out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
774 dpi_start_offset +
775 ((out_params->dpi) * p_hwfn->dpi_size));
776
777 out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
778 dpi_start_offset +
779 ((out_params->dpi) * p_hwfn->dpi_size);
780
781 out_params->dpi_size = p_hwfn->dpi_size;
782 out_params->wid_count = p_hwfn->wid_count;
783
784 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
785 return rc;
786}
787
788static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
789{
790 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
791 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
792
793 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
794
795 /* Link may have changed */
796 p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
797 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
798
799 p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
800
801 p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
802
803 return p_port;
804}
805
806static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
807{
808 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
809
810 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
811
812 /* Return struct with device parameters */
813 return p_hwfn->p_rdma_info->dev;
814}
815
816static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
817{
818 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
819
820 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
821
822 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
823 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
824 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
825}
826
827static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
828{
829 struct qed_hwfn *p_hwfn;
830 u16 qz_num;
831 u32 addr;
832
833 p_hwfn = (struct qed_hwfn *)rdma_cxt;
834
835 if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
836 DP_NOTICE(p_hwfn,
837 "queue zone offset %d is too large (max is %d)\n",
838 qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
839 return;
840 }
841
842 qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
843 addr = GTT_BAR0_MAP_REG_USDM_RAM +
844 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
845
846 REG_WR16(p_hwfn, addr, prod);
847
848 /* keep prod updates ordered */
849 wmb();
850}
851
852static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
853 struct qed_dev_rdma_info *info)
854{
855 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
856
857 memset(info, 0, sizeof(*info));
858
859 info->rdma_type = QED_RDMA_TYPE_ROCE;
860 info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
861
862 qed_fill_dev_info(cdev, &info->common);
863
864 return 0;
865}
866
867static int qed_rdma_get_sb_start(struct qed_dev *cdev)
868{
869 int feat_num;
870
871 if (cdev->num_hwfns > 1)
872 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
873 else
874 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
875 cdev->num_hwfns;
876
877 return feat_num;
878}
879
880static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
881{
882 int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
883 int n_msix = cdev->int_params.rdma_msix_cnt;
884
885 return min_t(int, n_cnq, n_msix);
886}
887
888static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
889{
890 int limit = 0;
891
892 /* Mark the fastpath as free/used */
893 cdev->int_params.fp_initialized = cnt ? true : false;
894
895 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
896 DP_ERR(cdev,
897 "qed roce supports only MSI-X interrupts (detected %d).\n",
898 cdev->int_params.out.int_mode);
899 return -EINVAL;
900 } else if (cdev->int_params.fp_msix_cnt) {
901 limit = cdev->int_params.rdma_msix_cnt;
902 }
903
904 if (!limit)
905 return -ENOMEM;
906
907 return min_t(int, cnt, limit);
908}
909
910static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
911{
912 memset(info, 0, sizeof(*info));
913
914 if (!cdev->int_params.fp_initialized) {
915 DP_INFO(cdev,
916 "Protocol driver requested interrupt information, but its support is not yet configured\n");
917 return -EINVAL;
918 }
919
920 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
921 int msix_base = cdev->int_params.rdma_msix_base;
922
923 info->msix_cnt = cdev->int_params.rdma_msix_cnt;
924 info->msix = &cdev->int_params.msix_table[msix_base];
925
926 DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
927 info->msix_cnt, msix_base);
928 }
929
930 return 0;
931}
932
933static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
934{
935 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
936 u32 returned_id;
937 int rc;
938
939 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
940
941 /* Allocates an unused protection domain */
942 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
943 rc = qed_rdma_bmap_alloc_id(p_hwfn,
944 &p_hwfn->p_rdma_info->pd_map, &returned_id);
945 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
946
947 *pd = (u16)returned_id;
948
949 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
950 return rc;
951}
952
953static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
954{
955 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
956
957 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
958
959 /* Returns a previously allocated protection domain for reuse */
960 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
961 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
962 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
963}
964
965static enum qed_rdma_toggle_bit
966qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
967{
968 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
969 enum qed_rdma_toggle_bit toggle_bit;
970 u32 bmap_id;
971
972 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
973
974 /* the function toggle the bit that is related to a given icid
975 * and returns the new toggle bit's value
976 */
977 bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
978
979 spin_lock_bh(&p_info->lock);
980 toggle_bit = !test_and_change_bit(bmap_id,
981 p_info->toggle_bits.bitmap);
982 spin_unlock_bh(&p_info->lock);
983
984 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
985 toggle_bit);
986
987 return toggle_bit;
988}
989
990static int qed_rdma_create_cq(void *rdma_cxt,
991 struct qed_rdma_create_cq_in_params *params,
992 u16 *icid)
993{
994 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
995 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
996 struct rdma_create_cq_ramrod_data *p_ramrod;
997 enum qed_rdma_toggle_bit toggle_bit;
998 struct qed_sp_init_data init_data;
999 struct qed_spq_entry *p_ent;
1000 u32 returned_id, start_cid;
1001 int rc;
1002
1003 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
1004 params->cq_handle_hi, params->cq_handle_lo);
1005
1006 /* Allocate icid */
1007 spin_lock_bh(&p_info->lock);
1008 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
1009 spin_unlock_bh(&p_info->lock);
1010
1011 if (rc) {
1012 DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
1013 return rc;
1014 }
1015
1016 start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
1017 p_info->proto);
1018 *icid = returned_id + start_cid;
1019
1020 /* Check if icid requires a page allocation */
1021 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
1022 if (rc)
1023 goto err;
1024
1025 /* Get SPQ entry */
1026 memset(&init_data, 0, sizeof(init_data));
1027 init_data.cid = *icid;
1028 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1029 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1030
1031 /* Send create CQ ramrod */
1032 rc = qed_sp_init_request(p_hwfn, &p_ent,
1033 RDMA_RAMROD_CREATE_CQ,
1034 p_info->proto, &init_data);
1035 if (rc)
1036 goto err;
1037
1038 p_ramrod = &p_ent->ramrod.rdma_create_cq;
1039
1040 p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
1041 p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
1042 p_ramrod->dpi = cpu_to_le16(params->dpi);
1043 p_ramrod->is_two_level_pbl = params->pbl_two_level;
1044 p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
1045 DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
1046 p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
1047 p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
1048 params->cnq_id;
1049 p_ramrod->int_timeout = params->int_timeout;
1050
1051 /* toggle the bit for every resize or create cq for a given icid */
1052 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1053
1054 p_ramrod->toggle_bit = toggle_bit;
1055
1056 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1057 if (rc) {
1058 /* restore toggle bit */
1059 qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1060 goto err;
1061 }
1062
1063 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
1064 return rc;
1065
1066err:
1067 /* release allocated icid */
1068 spin_lock_bh(&p_info->lock);
1069 qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
1070 spin_unlock_bh(&p_info->lock);
1071 DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
1072
1073 return rc;
1074}
1075
1076static int
1077qed_rdma_destroy_cq(void *rdma_cxt,
1078 struct qed_rdma_destroy_cq_in_params *in_params,
1079 struct qed_rdma_destroy_cq_out_params *out_params)
1080{
1081 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1082 struct rdma_destroy_cq_output_params *p_ramrod_res;
1083 struct rdma_destroy_cq_ramrod_data *p_ramrod;
1084 struct qed_sp_init_data init_data;
1085 struct qed_spq_entry *p_ent;
1086 dma_addr_t ramrod_res_phys;
1087 enum protocol_type proto;
1088 int rc = -ENOMEM;
1089
1090 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
1091
1092 p_ramrod_res =
1093 (struct rdma_destroy_cq_output_params *)
1094 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1095 sizeof(struct rdma_destroy_cq_output_params),
1096 &ramrod_res_phys, GFP_KERNEL);
1097 if (!p_ramrod_res) {
1098 DP_NOTICE(p_hwfn,
1099 "qed destroy cq failed: cannot allocate memory (ramrod)\n");
1100 return rc;
1101 }
1102
1103 /* Get SPQ entry */
1104 memset(&init_data, 0, sizeof(init_data));
1105 init_data.cid = in_params->icid;
1106 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1107 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1108 proto = p_hwfn->p_rdma_info->proto;
1109 /* Send destroy CQ ramrod */
1110 rc = qed_sp_init_request(p_hwfn, &p_ent,
1111 RDMA_RAMROD_DESTROY_CQ,
1112 proto, &init_data);
1113 if (rc)
1114 goto err;
1115
1116 p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
1117 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1118
1119 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1120 if (rc)
1121 goto err;
1122
1123 out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
1124
1125 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1126 sizeof(struct rdma_destroy_cq_output_params),
1127 p_ramrod_res, ramrod_res_phys);
1128
1129 /* Free icid */
1130 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1131
1132 qed_bmap_release_id(p_hwfn,
1133 &p_hwfn->p_rdma_info->cq_map,
1134 (in_params->icid -
1135 qed_cxt_get_proto_cid_start(p_hwfn, proto)));
1136
1137 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1138
1139 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
1140 return rc;
1141
1142err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1143 sizeof(struct rdma_destroy_cq_output_params),
1144 p_ramrod_res, ramrod_res_phys);
1145
1146 return rc;
1147}
1148
1149static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
1150{
1151 p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
1152 p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
1153 p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
1154}
1155
1156static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
1157 __le32 *dst_gid)
1158{
1159 u32 i;
1160
1161 if (qp->roce_mode == ROCE_V2_IPV4) {
1162 /* The IPv4 addresses shall be aligned to the highest word.
1163 * The lower words must be zero.
1164 */
1165 memset(src_gid, 0, sizeof(union qed_gid));
1166 memset(dst_gid, 0, sizeof(union qed_gid));
1167 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
1168 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
1169 } else {
1170 /* GIDs and IPv6 addresses coincide in location and size */
1171 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
1172 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
1173 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
1174 }
1175 }
1176}
1177
1178static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
1179{
1180 enum roce_flavor flavor;
1181
1182 switch (roce_mode) {
1183 case ROCE_V1:
1184 flavor = PLAIN_ROCE;
1185 break;
1186 case ROCE_V2_IPV4:
1187 flavor = RROCE_IPV4;
1188 break;
1189 case ROCE_V2_IPV6:
1190 flavor = ROCE_V2_IPV6;
1191 break;
1192 default:
1193 flavor = MAX_ROCE_MODE;
1194 break;
1195 }
1196 return flavor;
1197}
1198
1199void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
1200{
1201 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1202 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
1203 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
1204 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1205}
1206
1207static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
1208{
1209 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1210 u32 responder_icid;
1211 u32 requester_icid;
1212 int rc;
1213
1214 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1215 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
1216 &responder_icid);
1217 if (rc) {
1218 spin_unlock_bh(&p_rdma_info->lock);
1219 return rc;
1220 }
1221
1222 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
1223 &requester_icid);
1224
1225 spin_unlock_bh(&p_rdma_info->lock);
1226 if (rc)
1227 goto err;
1228
1229 /* the two icid's should be adjacent */
1230 if ((requester_icid - responder_icid) != 1) {
1231 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
1232 rc = -EINVAL;
1233 goto err;
1234 }
1235
1236 responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
1237 p_rdma_info->proto);
1238 requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
1239 p_rdma_info->proto);
1240
1241 /* If these icids require a new ILT line allocate DMA-able context for
1242 * an ILT page
1243 */
1244 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
1245 if (rc)
1246 goto err;
1247
1248 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
1249 if (rc)
1250 goto err;
1251
1252 *cid = (u16)responder_icid;
1253 return rc;
1254
1255err:
1256 spin_lock_bh(&p_rdma_info->lock);
1257 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
1258 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
1259
1260 spin_unlock_bh(&p_rdma_info->lock);
1261 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1262 "Allocate CID - failed, rc = %d\n", rc);
1263 return rc;
1264}
1265
1266static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
1267{
1268 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1269 qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
1270 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1271}
1272
1273static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
1274 struct qed_rdma_qp *qp)
1275{
1276 struct roce_create_qp_resp_ramrod_data *p_ramrod;
1277 struct qed_sp_init_data init_data;
1278 enum roce_flavor roce_flavor;
1279 struct qed_spq_entry *p_ent;
1280 u16 regular_latency_queue;
1281 enum protocol_type proto;
1282 int rc;
1283
1284 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1285
1286 /* Allocate DMA-able memory for IRQ */
1287 qp->irq_num_pages = 1;
1288 qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1289 RDMA_RING_PAGE_SIZE,
1290 &qp->irq_phys_addr, GFP_KERNEL);
1291 if (!qp->irq) {
1292 rc = -ENOMEM;
1293 DP_NOTICE(p_hwfn,
1294 "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
1295 rc);
1296 return rc;
1297 }
1298
1299 /* Get SPQ entry */
1300 memset(&init_data, 0, sizeof(init_data));
1301 init_data.cid = qp->icid;
1302 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1303 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1304
1305 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
1306 PROTOCOLID_ROCE, &init_data);
1307 if (rc)
1308 goto err;
1309
1310 p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
1311
1312 p_ramrod->flags = 0;
1313
1314 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
1315 SET_FIELD(p_ramrod->flags,
1316 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
1317
1318 SET_FIELD(p_ramrod->flags,
1319 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
1320 qp->incoming_rdma_read_en);
1321
1322 SET_FIELD(p_ramrod->flags,
1323 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
1324 qp->incoming_rdma_write_en);
1325
1326 SET_FIELD(p_ramrod->flags,
1327 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
1328 qp->incoming_atomic_en);
1329
1330 SET_FIELD(p_ramrod->flags,
1331 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
1332 qp->e2e_flow_control_en);
1333
1334 SET_FIELD(p_ramrod->flags,
1335 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
1336
1337 SET_FIELD(p_ramrod->flags,
1338 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
1339 qp->fmr_and_reserved_lkey);
1340
1341 SET_FIELD(p_ramrod->flags,
1342 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
1343 qp->min_rnr_nak_timer);
1344
1345 p_ramrod->max_ird = qp->max_rd_atomic_resp;
1346 p_ramrod->traffic_class = qp->traffic_class_tos;
1347 p_ramrod->hop_limit = qp->hop_limit_ttl;
1348 p_ramrod->irq_num_pages = qp->irq_num_pages;
1349 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1350 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1351 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
1352 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1353 p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
1354 p_ramrod->pd = cpu_to_le16(qp->pd);
1355 p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
1356 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
1357 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
1358 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1359 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
1360 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
1361 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
1362 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
1363 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
1364 qp->rq_cq_id);
1365
1366 regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
1367
1368 p_ramrod->regular_latency_phy_queue =
1369 cpu_to_le16(regular_latency_queue);
1370 p_ramrod->low_latency_phy_queue =
1371 cpu_to_le16(regular_latency_queue);
1372
1373 p_ramrod->dpi = cpu_to_le16(qp->dpi);
1374
1375 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
1376 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
1377
1378 p_ramrod->udp_src_port = qp->udp_src_port;
1379 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
1380 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
1381 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
1382
1383 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
1384 qp->stats_queue;
1385
1386 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1387
1388 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1389 "rc = %d regular physical queue = 0x%x\n", rc,
1390 regular_latency_queue);
1391
1392 if (rc)
1393 goto err;
1394
1395 qp->resp_offloaded = true;
1396 qp->cq_prod = 0;
1397
1398 proto = p_hwfn->p_rdma_info->proto;
1399 qed_roce_set_real_cid(p_hwfn, qp->icid -
1400 qed_cxt_get_proto_cid_start(p_hwfn, proto));
1401
1402 return rc;
1403
1404err:
1405 DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
1406 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1407 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
1408 qp->irq, qp->irq_phys_addr);
1409
1410 return rc;
1411}
1412
1413static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
1414 struct qed_rdma_qp *qp)
1415{
1416 struct roce_create_qp_req_ramrod_data *p_ramrod;
1417 struct qed_sp_init_data init_data;
1418 enum roce_flavor roce_flavor;
1419 struct qed_spq_entry *p_ent;
1420 u16 regular_latency_queue;
1421 enum protocol_type proto;
1422 int rc;
1423
1424 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1425
1426 /* Allocate DMA-able memory for ORQ */
1427 qp->orq_num_pages = 1;
1428 qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1429 RDMA_RING_PAGE_SIZE,
1430 &qp->orq_phys_addr, GFP_KERNEL);
1431 if (!qp->orq) {
1432 rc = -ENOMEM;
1433 DP_NOTICE(p_hwfn,
1434 "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
1435 rc);
1436 return rc;
1437 }
1438
1439 /* Get SPQ entry */
1440 memset(&init_data, 0, sizeof(init_data));
1441 init_data.cid = qp->icid + 1;
1442 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1443 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1444
1445 rc = qed_sp_init_request(p_hwfn, &p_ent,
1446 ROCE_RAMROD_CREATE_QP,
1447 PROTOCOLID_ROCE, &init_data);
1448 if (rc)
1449 goto err;
1450
1451 p_ramrod = &p_ent->ramrod.roce_create_qp_req;
1452
1453 p_ramrod->flags = 0;
1454
1455 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
1456 SET_FIELD(p_ramrod->flags,
1457 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
1458
1459 SET_FIELD(p_ramrod->flags,
1460 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
1461 qp->fmr_and_reserved_lkey);
1462
1463 SET_FIELD(p_ramrod->flags,
1464 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
1465
1466 SET_FIELD(p_ramrod->flags,
1467 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
1468
1469 SET_FIELD(p_ramrod->flags,
1470 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
1471 qp->rnr_retry_cnt);
1472
1473 p_ramrod->max_ord = qp->max_rd_atomic_req;
1474 p_ramrod->traffic_class = qp->traffic_class_tos;
1475 p_ramrod->hop_limit = qp->hop_limit_ttl;
1476 p_ramrod->orq_num_pages = qp->orq_num_pages;
1477 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1478 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1479 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
1480 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
1481 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1482 p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
1483 p_ramrod->pd = cpu_to_le16(qp->pd);
1484 p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
1485 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
1486 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
1487 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1488 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
1489 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
1490 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
1491 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
1492 p_ramrod->cq_cid =
1493 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
1494
1495 regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
1496
1497 p_ramrod->regular_latency_phy_queue =
1498 cpu_to_le16(regular_latency_queue);
1499 p_ramrod->low_latency_phy_queue =
1500 cpu_to_le16(regular_latency_queue);
1501
1502 p_ramrod->dpi = cpu_to_le16(qp->dpi);
1503
1504 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
1505 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
1506
1507 p_ramrod->udp_src_port = qp->udp_src_port;
1508 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
1509 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
1510 qp->stats_queue;
1511
1512 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1513
1514 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1515
1516 if (rc)
1517 goto err;
1518
1519 qp->req_offloaded = true;
1520 proto = p_hwfn->p_rdma_info->proto;
1521 qed_roce_set_real_cid(p_hwfn,
1522 qp->icid + 1 -
1523 qed_cxt_get_proto_cid_start(p_hwfn, proto));
1524
1525 return rc;
1526
1527err:
1528 DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
1529 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1530 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
1531 qp->orq, qp->orq_phys_addr);
1532 return rc;
1533}
1534
1535static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
1536 struct qed_rdma_qp *qp,
1537 bool move_to_err, u32 modify_flags)
1538{
1539 struct roce_modify_qp_resp_ramrod_data *p_ramrod;
1540 struct qed_sp_init_data init_data;
1541 struct qed_spq_entry *p_ent;
1542 int rc;
1543
1544 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1545
1546 if (move_to_err && !qp->resp_offloaded)
1547 return 0;
1548
1549 /* Get SPQ entry */
1550 memset(&init_data, 0, sizeof(init_data));
1551 init_data.cid = qp->icid;
1552 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1553 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1554
1555 rc = qed_sp_init_request(p_hwfn, &p_ent,
1556 ROCE_EVENT_MODIFY_QP,
1557 PROTOCOLID_ROCE, &init_data);
1558 if (rc) {
1559 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
1560 return rc;
1561 }
1562
1563 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
1564
1565 p_ramrod->flags = 0;
1566
1567 SET_FIELD(p_ramrod->flags,
1568 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
1569
1570 SET_FIELD(p_ramrod->flags,
1571 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
1572 qp->incoming_rdma_read_en);
1573
1574 SET_FIELD(p_ramrod->flags,
1575 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
1576 qp->incoming_rdma_write_en);
1577
1578 SET_FIELD(p_ramrod->flags,
1579 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
1580 qp->incoming_atomic_en);
1581
1582 SET_FIELD(p_ramrod->flags,
1583 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
1584 qp->e2e_flow_control_en);
1585
1586 SET_FIELD(p_ramrod->flags,
1587 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
1588 GET_FIELD(modify_flags,
1589 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
1590
1591 SET_FIELD(p_ramrod->flags,
1592 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
1593 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
1594
1595 SET_FIELD(p_ramrod->flags,
1596 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
1597 GET_FIELD(modify_flags,
1598 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
1599
1600 SET_FIELD(p_ramrod->flags,
1601 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
1602 GET_FIELD(modify_flags,
1603 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
1604
1605 SET_FIELD(p_ramrod->flags,
1606 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
1607 GET_FIELD(modify_flags,
1608 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
1609
1610 p_ramrod->fields = 0;
1611 SET_FIELD(p_ramrod->fields,
1612 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
1613 qp->min_rnr_nak_timer);
1614
1615 p_ramrod->max_ird = qp->max_rd_atomic_resp;
1616 p_ramrod->traffic_class = qp->traffic_class_tos;
1617 p_ramrod->hop_limit = qp->hop_limit_ttl;
1618 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1619 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1620 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1621 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1622 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1623
1624 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
1625 return rc;
1626}
1627
1628static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
1629 struct qed_rdma_qp *qp,
1630 bool move_to_sqd,
1631 bool move_to_err, u32 modify_flags)
1632{
1633 struct roce_modify_qp_req_ramrod_data *p_ramrod;
1634 struct qed_sp_init_data init_data;
1635 struct qed_spq_entry *p_ent;
1636 int rc;
1637
1638 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1639
1640 if (move_to_err && !(qp->req_offloaded))
1641 return 0;
1642
1643 /* Get SPQ entry */
1644 memset(&init_data, 0, sizeof(init_data));
1645 init_data.cid = qp->icid + 1;
1646 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1647 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1648
1649 rc = qed_sp_init_request(p_hwfn, &p_ent,
1650 ROCE_EVENT_MODIFY_QP,
1651 PROTOCOLID_ROCE, &init_data);
1652 if (rc) {
1653 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
1654 return rc;
1655 }
1656
1657 p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
1658
1659 p_ramrod->flags = 0;
1660
1661 SET_FIELD(p_ramrod->flags,
1662 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
1663
1664 SET_FIELD(p_ramrod->flags,
1665 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
1666
1667 SET_FIELD(p_ramrod->flags,
1668 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
1669 qp->sqd_async);
1670
1671 SET_FIELD(p_ramrod->flags,
1672 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
1673 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
1674
1675 SET_FIELD(p_ramrod->flags,
1676 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
1677 GET_FIELD(modify_flags,
1678 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
1679
1680 SET_FIELD(p_ramrod->flags,
1681 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
1682 GET_FIELD(modify_flags,
1683 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
1684
1685 SET_FIELD(p_ramrod->flags,
1686 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
1687 GET_FIELD(modify_flags,
1688 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
1689
1690 SET_FIELD(p_ramrod->flags,
1691 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
1692 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
1693
1694 SET_FIELD(p_ramrod->flags,
1695 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
1696 GET_FIELD(modify_flags,
1697 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
1698
1699 p_ramrod->fields = 0;
1700 SET_FIELD(p_ramrod->fields,
1701 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
1702
1703 SET_FIELD(p_ramrod->fields,
1704 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
1705 qp->rnr_retry_cnt);
1706
1707 p_ramrod->max_ord = qp->max_rd_atomic_req;
1708 p_ramrod->traffic_class = qp->traffic_class_tos;
1709 p_ramrod->hop_limit = qp->hop_limit_ttl;
1710 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1711 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1712 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
1713 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1714 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1715 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1716
1717 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
1718 return rc;
1719}
1720
1721static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
1722 struct qed_rdma_qp *qp,
1723 u32 *num_invalidated_mw,
1724 u32 *cq_prod)
1725{
1726 struct roce_destroy_qp_resp_output_params *p_ramrod_res;
1727 struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
1728 struct qed_sp_init_data init_data;
1729 struct qed_spq_entry *p_ent;
1730 dma_addr_t ramrod_res_phys;
1731 int rc;
1732
1733 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1734
1735 *num_invalidated_mw = 0;
1736 *cq_prod = qp->cq_prod;
1737
1738 if (!qp->resp_offloaded) {
1739 /* If a responder was never offload, we need to free the cids
1740 * allocated in create_qp as a FW async event will never arrive
1741 */
1742 u32 cid;
1743
1744 cid = qp->icid -
1745 qed_cxt_get_proto_cid_start(p_hwfn,
1746 p_hwfn->p_rdma_info->proto);
1747 qed_roce_free_cid_pair(p_hwfn, (u16)cid);
1748
1749 return 0;
1750 }
1751
1752 /* Get SPQ entry */
1753 memset(&init_data, 0, sizeof(init_data));
1754 init_data.cid = qp->icid;
1755 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1756 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1757
1758 rc = qed_sp_init_request(p_hwfn, &p_ent,
1759 ROCE_RAMROD_DESTROY_QP,
1760 PROTOCOLID_ROCE, &init_data);
1761 if (rc)
1762 return rc;
1763
1764 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
1765
1766 p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
1767 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
1768 &ramrod_res_phys, GFP_KERNEL);
1769
1770 if (!p_ramrod_res) {
1771 rc = -ENOMEM;
1772 DP_NOTICE(p_hwfn,
1773 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
1774 rc);
1775 return rc;
1776 }
1777
1778 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1779
1780 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1781 if (rc)
1782 goto err;
1783
1784 *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
1785 *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
1786 qp->cq_prod = *cq_prod;
1787
1788 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
1789 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1790 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
1791 qp->irq, qp->irq_phys_addr);
1792
1793 qp->resp_offloaded = false;
1794
1795 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
1796
1797err:
1798 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1799 sizeof(struct roce_destroy_qp_resp_output_params),
1800 p_ramrod_res, ramrod_res_phys);
1801
1802 return rc;
1803}
1804
1805static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
1806 struct qed_rdma_qp *qp,
1807 u32 *num_bound_mw)
1808{
1809 struct roce_destroy_qp_req_output_params *p_ramrod_res;
1810 struct roce_destroy_qp_req_ramrod_data *p_ramrod;
1811 struct qed_sp_init_data init_data;
1812 struct qed_spq_entry *p_ent;
1813 dma_addr_t ramrod_res_phys;
1814 int rc = -ENOMEM;
1815
1816 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1817
1818 if (!qp->req_offloaded)
1819 return 0;
1820
1821 p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
1822 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1823 sizeof(*p_ramrod_res),
1824 &ramrod_res_phys, GFP_KERNEL);
1825 if (!p_ramrod_res) {
1826 DP_NOTICE(p_hwfn,
1827 "qed destroy requester failed: cannot allocate memory (ramrod)\n");
1828 return rc;
1829 }
1830
1831 /* Get SPQ entry */
1832 memset(&init_data, 0, sizeof(init_data));
1833 init_data.cid = qp->icid + 1;
1834 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1835 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1836
1837 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
1838 PROTOCOLID_ROCE, &init_data);
1839 if (rc)
1840 goto err;
1841
1842 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
1843 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1844
1845 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1846 if (rc)
1847 goto err;
1848
1849 *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
1850
1851 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
1852 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1853 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
1854 qp->orq, qp->orq_phys_addr);
1855
1856 qp->req_offloaded = false;
1857
1858 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
1859
1860err:
1861 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
1862 p_ramrod_res, ramrod_res_phys);
1863
1864 return rc;
1865}
1866
1867static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
1868 struct qed_rdma_qp *qp,
1869 struct qed_rdma_query_qp_out_params *out_params)
1870{
1871 struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
1872 struct roce_query_qp_req_output_params *p_req_ramrod_res;
1873 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
1874 struct roce_query_qp_req_ramrod_data *p_req_ramrod;
1875 struct qed_sp_init_data init_data;
1876 dma_addr_t resp_ramrod_res_phys;
1877 dma_addr_t req_ramrod_res_phys;
1878 struct qed_spq_entry *p_ent;
1879 bool rq_err_state;
1880 bool sq_err_state;
1881 bool sq_draining;
1882 int rc = -ENOMEM;
1883
1884 if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
1885 /* We can't send ramrod to the fw since this qp wasn't offloaded
1886 * to the fw yet
1887 */
1888 out_params->draining = false;
1889 out_params->rq_psn = qp->rq_psn;
1890 out_params->sq_psn = qp->sq_psn;
1891 out_params->state = qp->cur_state;
1892
1893 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
1894 return 0;
1895 }
1896
1897 if (!(qp->resp_offloaded)) {
1898 DP_NOTICE(p_hwfn,
1899 "The responder's qp should be offloded before requester's\n");
1900 return -EINVAL;
1901 }
1902
1903 /* Send a query responder ramrod to FW to get RQ-PSN and state */
1904 p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
1905 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1906 sizeof(*p_resp_ramrod_res),
1907 &resp_ramrod_res_phys, GFP_KERNEL);
1908 if (!p_resp_ramrod_res) {
1909 DP_NOTICE(p_hwfn,
1910 "qed query qp failed: cannot allocate memory (ramrod)\n");
1911 return rc;
1912 }
1913
1914 /* Get SPQ entry */
1915 memset(&init_data, 0, sizeof(init_data));
1916 init_data.cid = qp->icid;
1917 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1918 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1919 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
1920 PROTOCOLID_ROCE, &init_data);
1921 if (rc)
1922 goto err_resp;
1923
1924 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
1925 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
1926
1927 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1928 if (rc)
1929 goto err_resp;
1930
1931 out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
1932 rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
1933 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
1934
1935 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
1936 p_resp_ramrod_res, resp_ramrod_res_phys);
1937
1938 if (!(qp->req_offloaded)) {
1939 /* Don't send query qp for the requester */
1940 out_params->sq_psn = qp->sq_psn;
1941 out_params->draining = false;
1942
1943 if (rq_err_state)
1944 qp->cur_state = QED_ROCE_QP_STATE_ERR;
1945
1946 out_params->state = qp->cur_state;
1947
1948 return 0;
1949 }
1950
1951 /* Send a query requester ramrod to FW to get SQ-PSN and state */
1952 p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
1953 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1954 sizeof(*p_req_ramrod_res),
1955 &req_ramrod_res_phys,
1956 GFP_KERNEL);
1957 if (!p_req_ramrod_res) {
1958 rc = -ENOMEM;
1959 DP_NOTICE(p_hwfn,
1960 "qed query qp failed: cannot allocate memory (ramrod)\n");
1961 return rc;
1962 }
1963
1964 /* Get SPQ entry */
1965 init_data.cid = qp->icid + 1;
1966 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
1967 PROTOCOLID_ROCE, &init_data);
1968 if (rc)
1969 goto err_req;
1970
1971 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
1972 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
1973
1974 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1975 if (rc)
1976 goto err_req;
1977
1978 out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
1979 sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
1980 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
1981 sq_draining =
1982 GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
1983 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
1984
1985 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
1986 p_req_ramrod_res, req_ramrod_res_phys);
1987
1988 out_params->draining = false;
1989
1990 if (rq_err_state || sq_err_state)
1991 qp->cur_state = QED_ROCE_QP_STATE_ERR;
1992 else if (sq_draining)
1993 out_params->draining = true;
1994 out_params->state = qp->cur_state;
1995
1996 return 0;
1997
1998err_req:
1999 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
2000 p_req_ramrod_res, req_ramrod_res_phys);
2001 return rc;
2002err_resp:
2003 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
2004 p_resp_ramrod_res, resp_ramrod_res_phys);
2005 return rc;
2006}
2007
2008static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
2009{
2010 u32 num_invalidated_mw = 0;
2011 u32 num_bound_mw = 0;
2012 u32 cq_prod;
2013 int rc;
2014
2015 /* Destroys the specified QP */
2016 if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
2017 (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
2018 (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
2019 DP_NOTICE(p_hwfn,
2020 "QP must be in error, reset or init state before destroying it\n");
2021 return -EINVAL;
2022 }
2023
2024 if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
2025 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
2026 &num_invalidated_mw,
2027 &cq_prod);
2028 if (rc)
2029 return rc;
2030
2031 /* Send destroy requester ramrod */
2032 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
2033 &num_bound_mw);
2034 if (rc)
2035 return rc;
2036
2037 if (num_invalidated_mw != num_bound_mw) {
2038 DP_NOTICE(p_hwfn,
2039 "number of invalidate memory windows is different from bounded ones\n");
2040 return -EINVAL;
2041 }
2042 }
2043
2044 return 0;
2045}
2046
2047static int qed_rdma_query_qp(void *rdma_cxt,
2048 struct qed_rdma_qp *qp,
2049 struct qed_rdma_query_qp_out_params *out_params)
2050{
2051 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2052 int rc;
2053
2054 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
2055
2056 /* The following fields are filled in from qp and not FW as they can't
2057 * be modified by FW
2058 */
2059 out_params->mtu = qp->mtu;
2060 out_params->dest_qp = qp->dest_qp;
2061 out_params->incoming_atomic_en = qp->incoming_atomic_en;
2062 out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
2063 out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
2064 out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
2065 out_params->dgid = qp->dgid;
2066 out_params->flow_label = qp->flow_label;
2067 out_params->hop_limit_ttl = qp->hop_limit_ttl;
2068 out_params->traffic_class_tos = qp->traffic_class_tos;
2069 out_params->timeout = qp->ack_timeout;
2070 out_params->rnr_retry = qp->rnr_retry_cnt;
2071 out_params->retry_cnt = qp->retry_cnt;
2072 out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
2073 out_params->pkey_index = 0;
2074 out_params->max_rd_atomic = qp->max_rd_atomic_req;
2075 out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
2076 out_params->sqd_async = qp->sqd_async;
2077
2078 rc = qed_roce_query_qp(p_hwfn, qp, out_params);
2079
2080 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
2081 return rc;
2082}
2083
2084static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
2085{
2086 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2087 int rc = 0;
2088
2089 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
2090
2091 rc = qed_roce_destroy_qp(p_hwfn, qp);
2092
2093 /* free qp params struct */
2094 kfree(qp);
2095
2096 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
2097 return rc;
2098}
2099
2100static struct qed_rdma_qp *
2101qed_rdma_create_qp(void *rdma_cxt,
2102 struct qed_rdma_create_qp_in_params *in_params,
2103 struct qed_rdma_create_qp_out_params *out_params)
2104{
2105 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2106 struct qed_rdma_qp *qp;
2107 u8 max_stats_queues;
2108 int rc;
2109
2110 if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
2111 DP_ERR(p_hwfn->cdev,
2112 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
2113 rdma_cxt, in_params, out_params);
2114 return NULL;
2115 }
2116
2117 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2118 "qed rdma create qp called with qp_handle = %08x%08x\n",
2119 in_params->qp_handle_hi, in_params->qp_handle_lo);
2120
2121 /* Some sanity checks... */
2122 max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
2123 if (in_params->stats_queue >= max_stats_queues) {
2124 DP_ERR(p_hwfn->cdev,
2125 "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
2126 in_params->stats_queue, max_stats_queues);
2127 return NULL;
2128 }
2129
2130 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2131 if (!qp)
2132 return NULL;
2133
2134 rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
2135 qp->qpid = ((0xFF << 16) | qp->icid);
2136
2137 DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
2138
2139 if (rc) {
2140 kfree(qp);
2141 return NULL;
2142 }
2143
2144 qp->cur_state = QED_ROCE_QP_STATE_RESET;
2145 qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
2146 qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
2147 qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
2148 qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
2149 qp->use_srq = in_params->use_srq;
2150 qp->signal_all = in_params->signal_all;
2151 qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
2152 qp->pd = in_params->pd;
2153 qp->dpi = in_params->dpi;
2154 qp->sq_cq_id = in_params->sq_cq_id;
2155 qp->sq_num_pages = in_params->sq_num_pages;
2156 qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
2157 qp->rq_cq_id = in_params->rq_cq_id;
2158 qp->rq_num_pages = in_params->rq_num_pages;
2159 qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
2160 qp->srq_id = in_params->srq_id;
2161 qp->req_offloaded = false;
2162 qp->resp_offloaded = false;
2163 qp->e2e_flow_control_en = qp->use_srq ? false : true;
2164 qp->stats_queue = in_params->stats_queue;
2165
2166 out_params->icid = qp->icid;
2167 out_params->qp_id = qp->qpid;
2168
2169 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
2170 return qp;
2171}
2172
2173static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
2174 struct qed_rdma_qp *qp,
2175 enum qed_roce_qp_state prev_state,
2176 struct qed_rdma_modify_qp_in_params *params)
2177{
2178 u32 num_invalidated_mw = 0, num_bound_mw = 0;
2179 int rc = 0;
2180
2181 /* Perform additional operations according to the current state and the
2182 * next state
2183 */
2184 if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
2185 (prev_state == QED_ROCE_QP_STATE_RESET)) &&
2186 (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
2187 /* Init->RTR or Reset->RTR */
2188 rc = qed_roce_sp_create_responder(p_hwfn, qp);
2189 return rc;
2190 } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
2191 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2192 /* RTR-> RTS */
2193 rc = qed_roce_sp_create_requester(p_hwfn, qp);
2194 if (rc)
2195 return rc;
2196
2197 /* Send modify responder ramrod */
2198 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2199 params->modify_flags);
2200 return rc;
2201 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
2202 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2203 /* RTS->RTS */
2204 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2205 params->modify_flags);
2206 if (rc)
2207 return rc;
2208
2209 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2210 params->modify_flags);
2211 return rc;
2212 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
2213 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
2214 /* RTS->SQD */
2215 rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
2216 params->modify_flags);
2217 return rc;
2218 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
2219 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
2220 /* SQD->SQD */
2221 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2222 params->modify_flags);
2223 if (rc)
2224 return rc;
2225
2226 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2227 params->modify_flags);
2228 return rc;
2229 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
2230 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2231 /* SQD->RTS */
2232 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2233 params->modify_flags);
2234 if (rc)
2235 return rc;
2236
2237 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2238 params->modify_flags);
2239
2240 return rc;
2241 } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) {
2242 /* ->ERR */
2243 rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
2244 params->modify_flags);
2245 if (rc)
2246 return rc;
2247
2248 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
2249 params->modify_flags);
2250 return rc;
2251 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
2252 /* Any state -> RESET */
2253 u32 cq_prod;
2254
2255 /* Send destroy responder ramrod */
2256 rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
2257 qp,
2258 &num_invalidated_mw,
2259 &cq_prod);
2260
2261 if (rc)
2262 return rc;
2263
2264 qp->cq_prod = cq_prod;
2265
2266 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
2267 &num_bound_mw);
2268
2269 if (num_invalidated_mw != num_bound_mw) {
2270 DP_NOTICE(p_hwfn,
2271 "number of invalidate memory windows is different from bounded ones\n");
2272 return -EINVAL;
2273 }
2274 } else {
2275 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
2276 }
2277
2278 return rc;
2279}
2280
2281static int qed_rdma_modify_qp(void *rdma_cxt,
2282 struct qed_rdma_qp *qp,
2283 struct qed_rdma_modify_qp_in_params *params)
2284{
2285 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2286 enum qed_roce_qp_state prev_state;
2287 int rc = 0;
2288
2289 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
2290 qp->icid, params->new_state);
2291
2292 if (rc) {
2293 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2294 return rc;
2295 }
2296
2297 if (GET_FIELD(params->modify_flags,
2298 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
2299 qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
2300 qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
2301 qp->incoming_atomic_en = params->incoming_atomic_en;
2302 }
2303
2304 /* Update QP structure with the updated values */
2305 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
2306 qp->roce_mode = params->roce_mode;
2307 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
2308 qp->pkey = params->pkey;
2309 if (GET_FIELD(params->modify_flags,
2310 QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
2311 qp->e2e_flow_control_en = params->e2e_flow_control_en;
2312 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
2313 qp->dest_qp = params->dest_qp;
2314 if (GET_FIELD(params->modify_flags,
2315 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
2316 /* Indicates that the following parameters have changed:
2317 * Traffic class, flow label, hop limit, source GID,
2318 * destination GID, loopback indicator
2319 */
2320 qp->traffic_class_tos = params->traffic_class_tos;
2321 qp->flow_label = params->flow_label;
2322 qp->hop_limit_ttl = params->hop_limit_ttl;
2323
2324 qp->sgid = params->sgid;
2325 qp->dgid = params->dgid;
2326 qp->udp_src_port = 0;
2327 qp->vlan_id = params->vlan_id;
2328 qp->mtu = params->mtu;
2329 qp->lb_indication = params->lb_indication;
2330 memcpy((u8 *)&qp->remote_mac_addr[0],
2331 (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
2332 if (params->use_local_mac) {
2333 memcpy((u8 *)&qp->local_mac_addr[0],
2334 (u8 *)&params->local_mac_addr[0], ETH_ALEN);
2335 } else {
2336 memcpy((u8 *)&qp->local_mac_addr[0],
2337 (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
2338 }
2339 }
2340 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
2341 qp->rq_psn = params->rq_psn;
2342 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
2343 qp->sq_psn = params->sq_psn;
2344 if (GET_FIELD(params->modify_flags,
2345 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
2346 qp->max_rd_atomic_req = params->max_rd_atomic_req;
2347 if (GET_FIELD(params->modify_flags,
2348 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
2349 qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
2350 if (GET_FIELD(params->modify_flags,
2351 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
2352 qp->ack_timeout = params->ack_timeout;
2353 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
2354 qp->retry_cnt = params->retry_cnt;
2355 if (GET_FIELD(params->modify_flags,
2356 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
2357 qp->rnr_retry_cnt = params->rnr_retry_cnt;
2358 if (GET_FIELD(params->modify_flags,
2359 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
2360 qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
2361
2362 qp->sqd_async = params->sqd_async;
2363
2364 prev_state = qp->cur_state;
2365 if (GET_FIELD(params->modify_flags,
2366 QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
2367 qp->cur_state = params->new_state;
2368 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
2369 qp->cur_state);
2370 }
2371
2372 rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
2373
2374 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
2375 return rc;
2376}
2377
2378static int
2379qed_rdma_register_tid(void *rdma_cxt,
2380 struct qed_rdma_register_tid_in_params *params)
2381{
2382 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2383 struct rdma_register_tid_ramrod_data *p_ramrod;
2384 struct qed_sp_init_data init_data;
2385 struct qed_spq_entry *p_ent;
2386 enum rdma_tid_type tid_type;
2387 u8 fw_return_code;
2388 int rc;
2389
2390 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
2391
2392 /* Get SPQ entry */
2393 memset(&init_data, 0, sizeof(init_data));
2394 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2395 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2396
2397 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
2398 p_hwfn->p_rdma_info->proto, &init_data);
2399 if (rc) {
2400 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2401 return rc;
2402 }
2403
2404 if (p_hwfn->p_rdma_info->last_tid < params->itid)
2405 p_hwfn->p_rdma_info->last_tid = params->itid;
2406
2407 p_ramrod = &p_ent->ramrod.rdma_register_tid;
2408
2409 p_ramrod->flags = 0;
2410 SET_FIELD(p_ramrod->flags,
2411 RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
2412 params->pbl_two_level);
2413
2414 SET_FIELD(p_ramrod->flags,
2415 RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
2416
2417 SET_FIELD(p_ramrod->flags,
2418 RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
2419
2420 /* Don't initialize D/C field, as it may override other bits. */
2421 if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
2422 SET_FIELD(p_ramrod->flags,
2423 RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
2424 params->page_size_log - 12);
2425
2426 SET_FIELD(p_ramrod->flags,
2427 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
2428 params->remote_read);
2429
2430 SET_FIELD(p_ramrod->flags,
2431 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
2432 params->remote_write);
2433
2434 SET_FIELD(p_ramrod->flags,
2435 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
2436 params->remote_atomic);
2437
2438 SET_FIELD(p_ramrod->flags,
2439 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
2440 params->local_write);
2441
2442 SET_FIELD(p_ramrod->flags,
2443 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
2444
2445 SET_FIELD(p_ramrod->flags,
2446 RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
2447 params->mw_bind);
2448
2449 SET_FIELD(p_ramrod->flags1,
2450 RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
2451 params->pbl_page_size_log - 12);
2452
2453 SET_FIELD(p_ramrod->flags2,
2454 RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
2455
2456 switch (params->tid_type) {
2457 case QED_RDMA_TID_REGISTERED_MR:
2458 tid_type = RDMA_TID_REGISTERED_MR;
2459 break;
2460 case QED_RDMA_TID_FMR:
2461 tid_type = RDMA_TID_FMR;
2462 break;
2463 case QED_RDMA_TID_MW_TYPE1:
2464 tid_type = RDMA_TID_MW_TYPE1;
2465 break;
2466 case QED_RDMA_TID_MW_TYPE2A:
2467 tid_type = RDMA_TID_MW_TYPE2A;
2468 break;
2469 default:
2470 rc = -EINVAL;
2471 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2472 return rc;
2473 }
2474 SET_FIELD(p_ramrod->flags1,
2475 RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
2476
2477 p_ramrod->itid = cpu_to_le32(params->itid);
2478 p_ramrod->key = params->key;
2479 p_ramrod->pd = cpu_to_le16(params->pd);
2480 p_ramrod->length_hi = (u8)(params->length >> 32);
2481 p_ramrod->length_lo = DMA_LO_LE(params->length);
2482 if (params->zbva) {
2483 /* Lower 32 bits of the registered MR address.
2484 * In case of zero based MR, will hold FBO
2485 */
2486 p_ramrod->va.hi = 0;
2487 p_ramrod->va.lo = cpu_to_le32(params->fbo);
2488 } else {
2489 DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
2490 }
2491 DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
2492
2493 /* DIF */
2494 if (params->dif_enabled) {
2495 SET_FIELD(p_ramrod->flags2,
2496 RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
2497 DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
2498 params->dif_error_addr);
2499 DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
2500 }
2501
2502 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2503 if (rc)
2504 return rc;
2505
2506 if (fw_return_code != RDMA_RETURN_OK) {
2507 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
2508 return -EINVAL;
2509 }
2510
2511 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
2512 return rc;
2513}
2514
2515static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
2516{
2517 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2518 struct rdma_deregister_tid_ramrod_data *p_ramrod;
2519 struct qed_sp_init_data init_data;
2520 struct qed_spq_entry *p_ent;
2521 struct qed_ptt *p_ptt;
2522 u8 fw_return_code;
2523 int rc;
2524
2525 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
2526
2527 /* Get SPQ entry */
2528 memset(&init_data, 0, sizeof(init_data));
2529 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2530 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2531
2532 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
2533 p_hwfn->p_rdma_info->proto, &init_data);
2534 if (rc) {
2535 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2536 return rc;
2537 }
2538
2539 p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
2540 p_ramrod->itid = cpu_to_le32(itid);
2541
2542 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2543 if (rc) {
2544 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2545 return rc;
2546 }
2547
2548 if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
2549 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
2550 return -EINVAL;
2551 } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
2552 /* Bit indicating that the TID is in use and a nig drain is
2553 * required before sending the ramrod again
2554 */
2555 p_ptt = qed_ptt_acquire(p_hwfn);
2556 if (!p_ptt) {
2557 rc = -EBUSY;
2558 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2559 "Failed to acquire PTT\n");
2560 return rc;
2561 }
2562
2563 rc = qed_mcp_drain(p_hwfn, p_ptt);
2564 if (rc) {
2565 qed_ptt_release(p_hwfn, p_ptt);
2566 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2567 "Drain failed\n");
2568 return rc;
2569 }
2570
2571 qed_ptt_release(p_hwfn, p_ptt);
2572
2573 /* Resend the ramrod */
2574 rc = qed_sp_init_request(p_hwfn, &p_ent,
2575 RDMA_RAMROD_DEREGISTER_MR,
2576 p_hwfn->p_rdma_info->proto,
2577 &init_data);
2578 if (rc) {
2579 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2580 "Failed to init sp-element\n");
2581 return rc;
2582 }
2583
2584 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2585 if (rc) {
2586 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2587 "Ramrod failed\n");
2588 return rc;
2589 }
2590
2591 if (fw_return_code != RDMA_RETURN_OK) {
2592 DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
2593 fw_return_code);
2594 return rc;
2595 }
2596 }
2597
2598 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
2599 return rc;
2600}
2601
2602static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
2603{
2604 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
2605 u32 start_cid, cid, xcid;
2606
2607 /* an even icid belongs to a responder while an odd icid belongs to a
2608 * requester. The 'cid' received as an input can be either. We calculate
2609 * the "partner" icid and call it xcid. Only if both are free then the
2610 * "cid" map can be cleared.
2611 */
2612 start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
2613 cid = icid - start_cid;
2614 xcid = cid ^ 1;
2615
2616 spin_lock_bh(&p_rdma_info->lock);
2617
2618 qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
2619 if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
2620 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
2621 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
2622 }
2623
2624 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
2625}
2626
2627static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
2628{
2629 return QED_LEADING_HWFN(cdev);
2630}
2631
2632static bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
2633{
2634 bool result;
2635
2636 /* if rdma info has not been allocated, naturally there are no qps */
2637 if (!p_hwfn->p_rdma_info)
2638 return false;
2639
2640 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
2641 if (!p_hwfn->p_rdma_info->cid_map.bitmap)
2642 result = false;
2643 else
2644 result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map);
2645 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
2646 return result;
2647}
2648
2649static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2650{
2651 u32 val;
2652
2653 val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
2654
2655 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
2656 DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
2657 "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
2658 val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
2659}
2660
2661void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2662{
2663 u8 val;
2664
2665 /* if any QPs are already active, we want to disable DPM, since their
2666 * context information contains information from before the latest DCBx
2667 * update. Otherwise enable it.
2668 */
2669 val = qed_rdma_allocated_qps(p_hwfn) ? true : false;
2670 p_hwfn->dcbx_no_edpm = (u8)val;
2671
2672 qed_rdma_dpm_conf(p_hwfn, p_ptt);
2673}
2674
2675void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2676{
2677 p_hwfn->db_bar_no_edpm = true;
2678
2679 qed_rdma_dpm_conf(p_hwfn, p_ptt);
2680}
2681
2682static int qed_rdma_start(void *rdma_cxt,
2683 struct qed_rdma_start_in_params *params)
2684{
2685 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2686 struct qed_ptt *p_ptt;
2687 int rc = -EBUSY;
2688
2689 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2690 "desired_cnq = %08x\n", params->desired_cnq);
2691
2692 p_ptt = qed_ptt_acquire(p_hwfn);
2693 if (!p_ptt)
2694 goto err;
2695
2696 rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
2697 if (rc)
2698 goto err1;
2699
2700 rc = qed_rdma_setup(p_hwfn, p_ptt, params);
2701 if (rc)
2702 goto err2;
2703
2704 qed_ptt_release(p_hwfn, p_ptt);
2705
2706 return rc;
2707
2708err2:
2709 qed_rdma_free(p_hwfn);
2710err1:
2711 qed_ptt_release(p_hwfn, p_ptt);
2712err:
2713 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
2714 return rc;
2715}
2716
2717static int qed_rdma_init(struct qed_dev *cdev,
2718 struct qed_rdma_start_in_params *params)
2719{
2720 return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
2721}
2722
2723static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
2724{
2725 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2726
2727 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
2728
2729 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
2730 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
2731 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
2732}
2733
2734static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
2735 u8 *old_mac_address,
2736 u8 *new_mac_address)
2737{
2738 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2739 struct qed_ptt *p_ptt;
2740 int rc = 0;
2741
2742 p_ptt = qed_ptt_acquire(p_hwfn);
2743 if (!p_ptt) {
2744 DP_ERR(cdev,
2745 "qed roce ll2 mac filter set: failed to acquire PTT\n");
2746 return -EINVAL;
2747 }
2748
2749 if (old_mac_address)
2750 qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address);
2751 if (new_mac_address)
2752 rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address);
2753
2754 qed_ptt_release(p_hwfn, p_ptt);
2755
2756 if (rc)
2757 DP_ERR(cdev,
2758 "qed roce ll2 mac filter set: failed to add MAC filter\n");
2759
2760 return rc;
2761}
2762
2763static const struct qed_rdma_ops qed_rdma_ops_pass = {
2764 .common = &qed_common_ops_pass,
2765 .fill_dev_info = &qed_fill_rdma_dev_info,
2766 .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
2767 .rdma_init = &qed_rdma_init,
2768 .rdma_add_user = &qed_rdma_add_user,
2769 .rdma_remove_user = &qed_rdma_remove_user,
2770 .rdma_stop = &qed_rdma_stop,
2771 .rdma_query_port = &qed_rdma_query_port,
2772 .rdma_query_device = &qed_rdma_query_device,
2773 .rdma_get_start_sb = &qed_rdma_get_sb_start,
2774 .rdma_get_rdma_int = &qed_rdma_get_int,
2775 .rdma_set_rdma_int = &qed_rdma_set_int,
2776 .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
2777 .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
2778 .rdma_alloc_pd = &qed_rdma_alloc_pd,
2779 .rdma_dealloc_pd = &qed_rdma_free_pd,
2780 .rdma_create_cq = &qed_rdma_create_cq,
2781 .rdma_destroy_cq = &qed_rdma_destroy_cq,
2782 .rdma_create_qp = &qed_rdma_create_qp,
2783 .rdma_modify_qp = &qed_rdma_modify_qp,
2784 .rdma_query_qp = &qed_rdma_query_qp,
2785 .rdma_destroy_qp = &qed_rdma_destroy_qp,
2786 .rdma_alloc_tid = &qed_rdma_alloc_tid,
2787 .rdma_free_tid = &qed_rdma_free_tid,
2788 .rdma_register_tid = &qed_rdma_register_tid,
2789 .rdma_deregister_tid = &qed_rdma_deregister_tid,
2790 .ll2_acquire_connection = &qed_ll2_acquire_connection,
2791 .ll2_establish_connection = &qed_ll2_establish_connection,
2792 .ll2_terminate_connection = &qed_ll2_terminate_connection,
2793 .ll2_release_connection = &qed_ll2_release_connection,
2794 .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer,
2795 .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet,
2796 .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
2797 .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
2798 .ll2_get_stats = &qed_ll2_get_stats,
2799};
2800
2801const struct qed_rdma_ops *qed_get_rdma_ops(void)
2802{
2803 return &qed_rdma_ops_pass;
2804}
2805EXPORT_SYMBOL(qed_get_rdma_ops);