blob: 187df38542f72f12110c4a0d16b4d7fb917a602d [file] [log] [blame]
Ram Amrani51ff1722016-10-01 21:59:57 +03001/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/types.h>
33#include <asm/byteorder.h>
34#include <linux/bitops.h>
35#include <linux/delay.h>
36#include <linux/dma-mapping.h>
37#include <linux/errno.h>
38#include <linux/etherdevice.h>
39#include <linux/if_ether.h>
40#include <linux/if_vlan.h>
41#include <linux/io.h>
42#include <linux/ip.h>
43#include <linux/ipv6.h>
44#include <linux/kernel.h>
45#include <linux/list.h>
46#include <linux/module.h>
47#include <linux/mutex.h>
48#include <linux/pci.h>
49#include <linux/slab.h>
50#include <linux/spinlock.h>
51#include <linux/string.h>
52#include <linux/tcp.h>
53#include <linux/bitops.h>
54#include <linux/qed/qed_roce_if.h>
55#include <linux/qed/qed_roce_if.h>
56#include "qed.h"
57#include "qed_cxt.h"
58#include "qed_hsi.h"
59#include "qed_hw.h"
60#include "qed_init_ops.h"
61#include "qed_int.h"
62#include "qed_ll2.h"
63#include "qed_mcp.h"
64#include "qed_reg_addr.h"
65#include "qed_sp.h"
66#include "qed_roce.h"
Ram Amraniabd49672016-10-01 22:00:01 +030067#include "qed_ll2.h"
Ram Amrani51ff1722016-10-01 21:59:57 +030068
69void qed_async_roce_event(struct qed_hwfn *p_hwfn,
70 struct event_ring_entry *p_eqe)
71{
72 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
73
74 p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
75 p_eqe->opcode, &p_eqe->data);
76}
77
78static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
79 struct qed_bmap *bmap, u32 max_count)
80{
81 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
82
83 bmap->max_count = max_count;
84
85 bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long),
86 GFP_KERNEL);
87 if (!bmap->bitmap) {
88 DP_NOTICE(p_hwfn,
89 "qed bmap alloc failed: cannot allocate memory (bitmap)\n");
90 return -ENOMEM;
91 }
92
93 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n",
94 bmap->bitmap);
95 return 0;
96}
97
98static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
99 struct qed_bmap *bmap, u32 *id_num)
100{
101 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap);
102
103 *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
104
105 if (*id_num >= bmap->max_count) {
106 DP_NOTICE(p_hwfn, "no id available max_count=%d\n",
107 bmap->max_count);
108 return -EINVAL;
109 }
110
111 __set_bit(*id_num, bmap->bitmap);
112
113 return 0;
114}
115
116static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
117 struct qed_bmap *bmap, u32 id_num)
118{
119 bool b_acquired;
120
121 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num);
122 if (id_num >= bmap->max_count)
123 return;
124
125 b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
126 if (!b_acquired) {
127 DP_NOTICE(p_hwfn, "ID %d already released\n", id_num);
128 return;
129 }
130}
131
Yuval Mintz0189efb2016-10-13 22:57:02 +0300132static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
Ram Amrani51ff1722016-10-01 21:59:57 +0300133{
134 /* First sb id for RoCE is after all the l2 sb */
135 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
136}
137
Ram Amrani51ff1722016-10-01 21:59:57 +0300138static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
139 struct qed_ptt *p_ptt,
140 struct qed_rdma_start_in_params *params)
141{
142 struct qed_rdma_info *p_rdma_info;
143 u32 num_cons, num_tasks;
144 int rc = -ENOMEM;
145
146 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
147
148 /* Allocate a struct with current pf rdma info */
149 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
150 if (!p_rdma_info) {
151 DP_NOTICE(p_hwfn,
152 "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n",
153 rc);
154 return rc;
155 }
156
157 p_hwfn->p_rdma_info = p_rdma_info;
158 p_rdma_info->proto = PROTOCOLID_ROCE;
159
160 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0);
161
162 p_rdma_info->num_qps = num_cons / 2;
163
164 num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
165
166 /* Each MR uses a single task */
167 p_rdma_info->num_mrs = num_tasks;
168
169 /* Queue zone lines are shared between RoCE and L2 in such a way that
170 * they can be used by each without obstructing the other.
171 */
172 p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
173
174 /* Allocate a struct with device params and fill it */
175 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
176 if (!p_rdma_info->dev) {
177 DP_NOTICE(p_hwfn,
178 "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
179 rc);
180 goto free_rdma_info;
181 }
182
183 /* Allocate a struct with port params and fill it */
184 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
185 if (!p_rdma_info->port) {
186 DP_NOTICE(p_hwfn,
187 "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n",
188 rc);
189 goto free_rdma_dev;
190 }
191
192 /* Allocate bit map for pd's */
193 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS);
194 if (rc) {
195 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
196 "Failed to allocate pd_map, rc = %d\n",
197 rc);
198 goto free_rdma_port;
199 }
200
201 /* Allocate DPI bitmap */
202 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
203 p_hwfn->dpi_count);
204 if (rc) {
205 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
206 "Failed to allocate DPI bitmap, rc = %d\n", rc);
207 goto free_pd_map;
208 }
209
210 /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
211 * twice the number of QPs.
212 */
213 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
214 p_rdma_info->num_qps * 2);
215 if (rc) {
216 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
217 "Failed to allocate cq bitmap, rc = %d\n", rc);
218 goto free_dpi_map;
219 }
220
221 /* Allocate bitmap for toggle bit for cq icids
222 * We toggle the bit every time we create or resize cq for a given icid.
223 * The maximum number of CQs is bounded to twice the number of QPs.
224 */
225 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
226 p_rdma_info->num_qps * 2);
227 if (rc) {
228 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
229 "Failed to allocate toogle bits, rc = %d\n", rc);
230 goto free_cq_map;
231 }
232
233 /* Allocate bitmap for itids */
234 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
235 p_rdma_info->num_mrs);
236 if (rc) {
237 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
238 "Failed to allocate itids bitmaps, rc = %d\n", rc);
239 goto free_toggle_map;
240 }
241
242 /* Allocate bitmap for cids used for qps. */
243 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons);
244 if (rc) {
245 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
246 "Failed to allocate cid bitmap, rc = %d\n", rc);
247 goto free_tid_map;
248 }
249
250 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
251 return 0;
252
253free_tid_map:
254 kfree(p_rdma_info->tid_map.bitmap);
255free_toggle_map:
256 kfree(p_rdma_info->toggle_bits.bitmap);
257free_cq_map:
258 kfree(p_rdma_info->cq_map.bitmap);
259free_dpi_map:
260 kfree(p_rdma_info->dpi_map.bitmap);
261free_pd_map:
262 kfree(p_rdma_info->pd_map.bitmap);
263free_rdma_port:
264 kfree(p_rdma_info->port);
265free_rdma_dev:
266 kfree(p_rdma_info->dev);
267free_rdma_info:
268 kfree(p_rdma_info);
269
270 return rc;
271}
272
Yuval Mintz0189efb2016-10-13 22:57:02 +0300273static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
Ram Amrani51ff1722016-10-01 21:59:57 +0300274{
275 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
276
277 kfree(p_rdma_info->cid_map.bitmap);
278 kfree(p_rdma_info->tid_map.bitmap);
279 kfree(p_rdma_info->toggle_bits.bitmap);
280 kfree(p_rdma_info->cq_map.bitmap);
281 kfree(p_rdma_info->dpi_map.bitmap);
282 kfree(p_rdma_info->pd_map.bitmap);
283
284 kfree(p_rdma_info->port);
285 kfree(p_rdma_info->dev);
286
287 kfree(p_rdma_info);
288}
289
290static void qed_rdma_free(struct qed_hwfn *p_hwfn)
291{
292 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
293
294 qed_rdma_resc_free(p_hwfn);
295}
296
297static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
298{
299 guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
300 guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
301 guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
302 guid[3] = 0xff;
303 guid[4] = 0xfe;
304 guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
305 guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
306 guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
307}
308
309static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
310 struct qed_rdma_start_in_params *params)
311{
312 struct qed_rdma_events *events;
313
314 events = &p_hwfn->p_rdma_info->events;
315
316 events->unaffiliated_event = params->events->unaffiliated_event;
317 events->affiliated_event = params->events->affiliated_event;
318 events->context = params->events->context;
319}
320
321static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
322 struct qed_rdma_start_in_params *params)
323{
324 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
325 struct qed_dev *cdev = p_hwfn->cdev;
326 u32 pci_status_control;
327 u32 num_qps;
328
329 /* Vendor specific information */
330 dev->vendor_id = cdev->vendor_id;
331 dev->vendor_part_id = cdev->device_id;
332 dev->hw_ver = 0;
333 dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
334 (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
335
336 qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
337 dev->node_guid = dev->sys_image_guid;
338
339 dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
340 RDMA_MAX_SGE_PER_RQ_WQE);
341
342 if (cdev->rdma_max_sge)
343 dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
344
345 dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
346
347 dev->max_inline = (cdev->rdma_max_inline) ?
348 min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
349 dev->max_inline;
350
351 dev->max_wqe = QED_RDMA_MAX_WQE;
352 dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
353
354 /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
355 * it is up-aligned to 16 and then to ILT page size within qed cxt.
356 * This is OK in terms of ILT but we don't want to configure the FW
357 * above its abilities
358 */
359 num_qps = ROCE_MAX_QPS;
360 num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
361 dev->max_qp = num_qps;
362
363 /* CQs uses the same icids that QPs use hence they are limited by the
364 * number of icids. There are two icids per QP.
365 */
366 dev->max_cq = num_qps * 2;
367
368 /* The number of mrs is smaller by 1 since the first is reserved */
369 dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
370 dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
371
372 /* The maximum CQE capacity per CQ supported.
373 * max number of cqes will be in two layer pbl,
374 * 8 is the pointer size in bytes
375 * 32 is the size of cq element in bytes
376 */
377 if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
378 dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
379 else
380 dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
381
382 dev->max_mw = 0;
383 dev->max_fmr = QED_RDMA_MAX_FMR;
384 dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
385 dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
386 dev->max_pkey = QED_RDMA_MAX_P_KEY;
387
388 dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
389 (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
390 dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
391 RDMA_REQ_RD_ATOMIC_ELM_SIZE;
392 dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
393 p_hwfn->p_rdma_info->num_qps;
394 dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
395 dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
396 dev->max_pd = RDMA_MAX_PDS;
397 dev->max_ah = p_hwfn->p_rdma_info->num_qps;
398 dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
399
400 /* Set capablities */
401 dev->dev_caps = 0;
402 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
403 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
404 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
405 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
406 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
407 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
408 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
409 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
410
411 /* Check atomic operations support in PCI configuration space. */
412 pci_read_config_dword(cdev->pdev,
413 cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
414 &pci_status_control);
415
416 if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
417 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
418}
419
420static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
421{
422 struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
423 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
424
425 port->port_state = p_hwfn->mcp_info->link_output.link_up ?
426 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
427
428 port->max_msg_size = min_t(u64,
429 (dev->max_mr_mw_fmr_size *
430 p_hwfn->cdev->rdma_max_sge),
431 BIT(31));
432
433 port->pkey_bad_counter = 0;
434}
435
436static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
437{
438 u32 ll2_ethertype_en;
439
440 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
441 p_hwfn->b_rdma_enabled_in_prs = false;
442
443 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
444
445 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
446
447 /* We delay writing to this reg until first cid is allocated. See
448 * qed_cxt_dynamic_ilt_alloc function for more details
449 */
450 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
451 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
452 (ll2_ethertype_en | 0x01));
453
454 if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
455 DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
456 return -EINVAL;
457 }
458
459 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
460 return 0;
461}
462
463static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
464 struct qed_rdma_start_in_params *params,
465 struct qed_ptt *p_ptt)
466{
467 struct rdma_init_func_ramrod_data *p_ramrod;
468 struct qed_rdma_cnq_params *p_cnq_pbl_list;
469 struct rdma_init_func_hdr *p_params_header;
470 struct rdma_cnq_params *p_cnq_params;
471 struct qed_sp_init_data init_data;
472 struct qed_spq_entry *p_ent;
473 u32 cnq_id, sb_id;
474 int rc;
475
476 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
477
478 /* Save the number of cnqs for the function close ramrod */
479 p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
480
481 /* Get SPQ entry */
482 memset(&init_data, 0, sizeof(init_data));
483 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
484 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
485
486 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
487 p_hwfn->p_rdma_info->proto, &init_data);
488 if (rc)
489 return rc;
490
491 p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
492
493 p_params_header = &p_ramrod->params_header;
494 p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
495 QED_RDMA_CNQ_RAM);
496 p_params_header->num_cnqs = params->desired_cnq;
497
498 if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
499 p_params_header->cq_ring_mode = 1;
500 else
501 p_params_header->cq_ring_mode = 0;
502
503 for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
504 sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
505 p_cnq_params = &p_ramrod->cnq_params[cnq_id];
506 p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
507 p_cnq_params->sb_num =
508 cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id);
509
510 p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
511 p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
512
513 DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
514 p_cnq_pbl_list->pbl_ptr);
515
516 /* we assume here that cnq_id and qz_offset are the same */
517 p_cnq_params->queue_zone_num =
518 cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
519 cnq_id);
520 }
521
522 return qed_spq_post(p_hwfn, p_ent, NULL);
523}
524
Yuval Mintz0189efb2016-10-13 22:57:02 +0300525static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
526{
527 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
528 int rc;
529
530 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
531
532 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
533 rc = qed_rdma_bmap_alloc_id(p_hwfn,
534 &p_hwfn->p_rdma_info->tid_map, itid);
535 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
536 if (rc)
537 goto out;
538
539 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
540out:
541 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
542 return rc;
543}
544
Ram Amrani51ff1722016-10-01 21:59:57 +0300545static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
546{
547 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
548
549 /* The first DPI is reserved for the Kernel */
550 __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
551
552 /* Tid 0 will be used as the key for "reserved MR".
553 * The driver should allocate memory for it so it can be loaded but no
554 * ramrod should be passed on it.
555 */
556 qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
557 if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
558 DP_NOTICE(p_hwfn,
559 "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
560 return -EINVAL;
561 }
562
563 return 0;
564}
565
566static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
567 struct qed_ptt *p_ptt,
568 struct qed_rdma_start_in_params *params)
569{
570 int rc;
571
572 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
573
574 spin_lock_init(&p_hwfn->p_rdma_info->lock);
575
576 qed_rdma_init_devinfo(p_hwfn, params);
577 qed_rdma_init_port(p_hwfn);
578 qed_rdma_init_events(p_hwfn, params);
579
580 rc = qed_rdma_reserve_lkey(p_hwfn);
581 if (rc)
582 return rc;
583
584 rc = qed_rdma_init_hw(p_hwfn, p_ptt);
585 if (rc)
586 return rc;
587
588 return qed_rdma_start_fw(p_hwfn, params, p_ptt);
589}
590
Yuval Mintz0189efb2016-10-13 22:57:02 +0300591static int qed_rdma_stop(void *rdma_cxt)
Ram Amrani51ff1722016-10-01 21:59:57 +0300592{
593 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
594 struct rdma_close_func_ramrod_data *p_ramrod;
595 struct qed_sp_init_data init_data;
596 struct qed_spq_entry *p_ent;
597 struct qed_ptt *p_ptt;
598 u32 ll2_ethertype_en;
599 int rc = -EBUSY;
600
601 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
602
603 p_ptt = qed_ptt_acquire(p_hwfn);
604 if (!p_ptt) {
605 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
606 return rc;
607 }
608
609 /* Disable RoCE search */
610 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
611 p_hwfn->b_rdma_enabled_in_prs = false;
612
613 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
614
615 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
616
617 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
618 (ll2_ethertype_en & 0xFFFE));
619
620 qed_ptt_release(p_hwfn, p_ptt);
621
622 /* Get SPQ entry */
623 memset(&init_data, 0, sizeof(init_data));
624 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
625 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
626
627 /* Stop RoCE */
628 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
629 p_hwfn->p_rdma_info->proto, &init_data);
630 if (rc)
631 goto out;
632
633 p_ramrod = &p_ent->ramrod.rdma_close_func;
634
635 p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
636 p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
637
638 rc = qed_spq_post(p_hwfn, p_ent, NULL);
639
640out:
641 qed_rdma_free(p_hwfn);
642
643 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
644 return rc;
645}
646
Yuval Mintz0189efb2016-10-13 22:57:02 +0300647static int qed_rdma_add_user(void *rdma_cxt,
648 struct qed_rdma_add_user_out_params *out_params)
Ram Amrani51ff1722016-10-01 21:59:57 +0300649{
650 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
651 u32 dpi_start_offset;
652 u32 returned_id = 0;
653 int rc;
654
655 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
656
657 /* Allocate DPI */
658 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
659 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
660 &returned_id);
661 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
662
663 out_params->dpi = (u16)returned_id;
664
665 /* Calculate the corresponding DPI address */
666 dpi_start_offset = p_hwfn->dpi_start_offset;
667
668 out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
669 dpi_start_offset +
670 ((out_params->dpi) * p_hwfn->dpi_size));
671
672 out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
673 dpi_start_offset +
674 ((out_params->dpi) * p_hwfn->dpi_size);
675
676 out_params->dpi_size = p_hwfn->dpi_size;
677
678 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
679 return rc;
680}
681
Yuval Mintz0189efb2016-10-13 22:57:02 +0300682static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
Ram Amranic295f862016-10-01 21:59:58 +0300683{
684 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
685 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
686
687 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
688
689 /* Link may have changed */
690 p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
691 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
692
693 p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
694
695 return p_port;
696}
697
Yuval Mintz0189efb2016-10-13 22:57:02 +0300698static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
Ram Amrani51ff1722016-10-01 21:59:57 +0300699{
700 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
701
702 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
703
704 /* Return struct with device parameters */
705 return p_hwfn->p_rdma_info->dev;
706}
707
Yuval Mintz0189efb2016-10-13 22:57:02 +0300708static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
Ram Amraniee8eaea2016-10-01 22:00:00 +0300709{
710 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
711
712 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
713
714 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
715 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
716 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
717}
718
Yuval Mintz0189efb2016-10-13 22:57:02 +0300719static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
Ram Amrani51ff1722016-10-01 21:59:57 +0300720{
721 struct qed_hwfn *p_hwfn;
722 u16 qz_num;
723 u32 addr;
724
725 p_hwfn = (struct qed_hwfn *)rdma_cxt;
726 qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
727 addr = GTT_BAR0_MAP_REG_USDM_RAM +
728 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
729
730 REG_WR16(p_hwfn, addr, prod);
731
732 /* keep prod updates ordered */
733 wmb();
734}
735
736static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
737 struct qed_dev_rdma_info *info)
738{
739 memset(info, 0, sizeof(*info));
740
741 info->rdma_type = QED_RDMA_TYPE_ROCE;
742
743 qed_fill_dev_info(cdev, &info->common);
744
745 return 0;
746}
747
748static int qed_rdma_get_sb_start(struct qed_dev *cdev)
749{
750 int feat_num;
751
752 if (cdev->num_hwfns > 1)
753 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
754 else
755 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
756 cdev->num_hwfns;
757
758 return feat_num;
759}
760
761static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
762{
763 int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
764 int n_msix = cdev->int_params.rdma_msix_cnt;
765
766 return min_t(int, n_cnq, n_msix);
767}
768
769static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
770{
771 int limit = 0;
772
773 /* Mark the fastpath as free/used */
774 cdev->int_params.fp_initialized = cnt ? true : false;
775
776 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
777 DP_ERR(cdev,
778 "qed roce supports only MSI-X interrupts (detected %d).\n",
779 cdev->int_params.out.int_mode);
780 return -EINVAL;
781 } else if (cdev->int_params.fp_msix_cnt) {
782 limit = cdev->int_params.rdma_msix_cnt;
783 }
784
785 if (!limit)
786 return -ENOMEM;
787
788 return min_t(int, cnt, limit);
789}
790
791static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
792{
793 memset(info, 0, sizeof(*info));
794
795 if (!cdev->int_params.fp_initialized) {
796 DP_INFO(cdev,
797 "Protocol driver requested interrupt information, but its support is not yet configured\n");
798 return -EINVAL;
799 }
800
801 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
802 int msix_base = cdev->int_params.rdma_msix_base;
803
804 info->msix_cnt = cdev->int_params.rdma_msix_cnt;
805 info->msix = &cdev->int_params.msix_table[msix_base];
806
807 DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
808 info->msix_cnt, msix_base);
809 }
810
811 return 0;
812}
813
Yuval Mintz0189efb2016-10-13 22:57:02 +0300814static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
Ram Amranic295f862016-10-01 21:59:58 +0300815{
816 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
817 u32 returned_id;
818 int rc;
819
820 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
821
822 /* Allocates an unused protection domain */
823 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
824 rc = qed_rdma_bmap_alloc_id(p_hwfn,
825 &p_hwfn->p_rdma_info->pd_map, &returned_id);
826 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
827
828 *pd = (u16)returned_id;
829
830 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
831 return rc;
832}
833
834void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
835{
836 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
837
838 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
839
840 /* Returns a previously allocated protection domain for reuse */
841 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
842 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
843 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
844}
845
846static enum qed_rdma_toggle_bit
847qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
848{
849 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
850 enum qed_rdma_toggle_bit toggle_bit;
851 u32 bmap_id;
852
853 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
854
855 /* the function toggle the bit that is related to a given icid
856 * and returns the new toggle bit's value
857 */
858 bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
859
860 spin_lock_bh(&p_info->lock);
861 toggle_bit = !test_and_change_bit(bmap_id,
862 p_info->toggle_bits.bitmap);
863 spin_unlock_bh(&p_info->lock);
864
865 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
866 toggle_bit);
867
868 return toggle_bit;
869}
870
871int qed_rdma_create_cq(void *rdma_cxt,
872 struct qed_rdma_create_cq_in_params *params, u16 *icid)
873{
874 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
875 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
876 struct rdma_create_cq_ramrod_data *p_ramrod;
877 enum qed_rdma_toggle_bit toggle_bit;
878 struct qed_sp_init_data init_data;
879 struct qed_spq_entry *p_ent;
880 u32 returned_id, start_cid;
881 int rc;
882
883 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
884 params->cq_handle_hi, params->cq_handle_lo);
885
886 /* Allocate icid */
887 spin_lock_bh(&p_info->lock);
888 rc = qed_rdma_bmap_alloc_id(p_hwfn,
889 &p_info->cq_map, &returned_id);
890 spin_unlock_bh(&p_info->lock);
891
892 if (rc) {
893 DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
894 return rc;
895 }
896
897 start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
898 p_info->proto);
899 *icid = returned_id + start_cid;
900
901 /* Check if icid requires a page allocation */
902 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
903 if (rc)
904 goto err;
905
906 /* Get SPQ entry */
907 memset(&init_data, 0, sizeof(init_data));
908 init_data.cid = *icid;
909 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
910 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
911
912 /* Send create CQ ramrod */
913 rc = qed_sp_init_request(p_hwfn, &p_ent,
914 RDMA_RAMROD_CREATE_CQ,
915 p_info->proto, &init_data);
916 if (rc)
917 goto err;
918
919 p_ramrod = &p_ent->ramrod.rdma_create_cq;
920
921 p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
922 p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
923 p_ramrod->dpi = cpu_to_le16(params->dpi);
924 p_ramrod->is_two_level_pbl = params->pbl_two_level;
925 p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
926 DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
927 p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
928 p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
929 params->cnq_id;
930 p_ramrod->int_timeout = params->int_timeout;
931
932 /* toggle the bit for every resize or create cq for a given icid */
933 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
934
935 p_ramrod->toggle_bit = toggle_bit;
936
937 rc = qed_spq_post(p_hwfn, p_ent, NULL);
938 if (rc) {
939 /* restore toggle bit */
940 qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
941 goto err;
942 }
943
944 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
945 return rc;
946
947err:
948 /* release allocated icid */
949 qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
950 DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
951
952 return rc;
953}
954
955int qed_rdma_resize_cq(void *rdma_cxt,
956 struct qed_rdma_resize_cq_in_params *in_params,
957 struct qed_rdma_resize_cq_out_params *out_params)
958{
959 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
960 struct rdma_resize_cq_output_params *p_ramrod_res;
961 struct rdma_resize_cq_ramrod_data *p_ramrod;
962 enum qed_rdma_toggle_bit toggle_bit;
963 struct qed_sp_init_data init_data;
964 struct qed_spq_entry *p_ent;
965 dma_addr_t ramrod_res_phys;
966 u8 fw_return_code;
967 int rc = -ENOMEM;
968
969 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
970
971 p_ramrod_res =
972 (struct rdma_resize_cq_output_params *)
973 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
974 sizeof(struct rdma_resize_cq_output_params),
975 &ramrod_res_phys, GFP_KERNEL);
976 if (!p_ramrod_res) {
977 DP_NOTICE(p_hwfn,
978 "qed resize cq failed: cannot allocate memory (ramrod)\n");
979 return rc;
980 }
981
982 /* Get SPQ entry */
983 memset(&init_data, 0, sizeof(init_data));
984 init_data.cid = in_params->icid;
985 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
986 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
987
988 rc = qed_sp_init_request(p_hwfn, &p_ent,
989 RDMA_RAMROD_RESIZE_CQ,
990 p_hwfn->p_rdma_info->proto, &init_data);
991 if (rc)
992 goto err;
993
994 p_ramrod = &p_ent->ramrod.rdma_resize_cq;
995
996 p_ramrod->flags = 0;
997
998 /* toggle the bit for every resize or create cq for a given icid */
999 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn,
1000 in_params->icid);
1001
1002 SET_FIELD(p_ramrod->flags,
1003 RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit);
1004
1005 SET_FIELD(p_ramrod->flags,
1006 RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
1007 in_params->pbl_two_level);
1008
1009 p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
1010 p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages);
1011 p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size);
1012 DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr);
1013 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1014
1015 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1016 if (rc)
1017 goto err;
1018
1019 if (fw_return_code != RDMA_RETURN_OK) {
1020 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
1021 rc = -EINVAL;
1022 goto err;
1023 }
1024
1025 out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod);
1026 out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons);
1027
1028 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1029 sizeof(struct rdma_resize_cq_output_params),
1030 p_ramrod_res, ramrod_res_phys);
1031
1032 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc);
1033
1034 return rc;
1035
1036err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1037 sizeof(struct rdma_resize_cq_output_params),
1038 p_ramrod_res, ramrod_res_phys);
1039 DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc);
1040
1041 return rc;
1042}
1043
1044int qed_rdma_destroy_cq(void *rdma_cxt,
1045 struct qed_rdma_destroy_cq_in_params *in_params,
1046 struct qed_rdma_destroy_cq_out_params *out_params)
1047{
1048 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1049 struct rdma_destroy_cq_output_params *p_ramrod_res;
1050 struct rdma_destroy_cq_ramrod_data *p_ramrod;
1051 struct qed_sp_init_data init_data;
1052 struct qed_spq_entry *p_ent;
1053 dma_addr_t ramrod_res_phys;
1054 int rc = -ENOMEM;
1055
1056 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
1057
1058 p_ramrod_res =
1059 (struct rdma_destroy_cq_output_params *)
1060 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1061 sizeof(struct rdma_destroy_cq_output_params),
1062 &ramrod_res_phys, GFP_KERNEL);
1063 if (!p_ramrod_res) {
1064 DP_NOTICE(p_hwfn,
1065 "qed destroy cq failed: cannot allocate memory (ramrod)\n");
1066 return rc;
1067 }
1068
1069 /* Get SPQ entry */
1070 memset(&init_data, 0, sizeof(init_data));
1071 init_data.cid = in_params->icid;
1072 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1073 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1074
1075 /* Send destroy CQ ramrod */
1076 rc = qed_sp_init_request(p_hwfn, &p_ent,
1077 RDMA_RAMROD_DESTROY_CQ,
1078 p_hwfn->p_rdma_info->proto, &init_data);
1079 if (rc)
1080 goto err;
1081
1082 p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
1083 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1084
1085 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1086 if (rc)
1087 goto err;
1088
1089 out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
1090
1091 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1092 sizeof(struct rdma_destroy_cq_output_params),
1093 p_ramrod_res, ramrod_res_phys);
1094
1095 /* Free icid */
1096 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1097
1098 qed_bmap_release_id(p_hwfn,
1099 &p_hwfn->p_rdma_info->cq_map,
1100 (in_params->icid -
1101 qed_cxt_get_proto_cid_start(p_hwfn,
1102 p_hwfn->
1103 p_rdma_info->proto)));
1104
1105 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1106
1107 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
1108 return rc;
1109
1110err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1111 sizeof(struct rdma_destroy_cq_output_params),
1112 p_ramrod_res, ramrod_res_phys);
1113
1114 return rc;
1115}
1116
Ram Amranif1093942016-10-01 21:59:59 +03001117static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
1118{
1119 p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
1120 p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
1121 p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
1122}
1123
1124static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
1125 __le32 *dst_gid)
1126{
1127 u32 i;
1128
1129 if (qp->roce_mode == ROCE_V2_IPV4) {
1130 /* The IPv4 addresses shall be aligned to the highest word.
1131 * The lower words must be zero.
1132 */
1133 memset(src_gid, 0, sizeof(union qed_gid));
1134 memset(dst_gid, 0, sizeof(union qed_gid));
1135 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
1136 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
1137 } else {
1138 /* GIDs and IPv6 addresses coincide in location and size */
1139 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
1140 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
1141 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
1142 }
1143 }
1144}
1145
1146static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
1147{
1148 enum roce_flavor flavor;
1149
1150 switch (roce_mode) {
1151 case ROCE_V1:
1152 flavor = PLAIN_ROCE;
1153 break;
1154 case ROCE_V2_IPV4:
1155 flavor = RROCE_IPV4;
1156 break;
1157 case ROCE_V2_IPV6:
1158 flavor = ROCE_V2_IPV6;
1159 break;
1160 default:
1161 flavor = MAX_ROCE_MODE;
1162 break;
1163 }
1164 return flavor;
1165}
1166
1167int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
1168{
1169 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1170 u32 responder_icid;
1171 u32 requester_icid;
1172 int rc;
1173
1174 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1175 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
1176 &responder_icid);
1177 if (rc) {
1178 spin_unlock_bh(&p_rdma_info->lock);
1179 return rc;
1180 }
1181
1182 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
1183 &requester_icid);
1184
1185 spin_unlock_bh(&p_rdma_info->lock);
1186 if (rc)
1187 goto err;
1188
1189 /* the two icid's should be adjacent */
1190 if ((requester_icid - responder_icid) != 1) {
1191 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
1192 rc = -EINVAL;
1193 goto err;
1194 }
1195
1196 responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
1197 p_rdma_info->proto);
1198 requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
1199 p_rdma_info->proto);
1200
1201 /* If these icids require a new ILT line allocate DMA-able context for
1202 * an ILT page
1203 */
1204 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
1205 if (rc)
1206 goto err;
1207
1208 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
1209 if (rc)
1210 goto err;
1211
1212 *cid = (u16)responder_icid;
1213 return rc;
1214
1215err:
1216 spin_lock_bh(&p_rdma_info->lock);
1217 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
1218 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
1219
1220 spin_unlock_bh(&p_rdma_info->lock);
1221 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1222 "Allocate CID - failed, rc = %d\n", rc);
1223 return rc;
1224}
1225
1226static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
1227 struct qed_rdma_qp *qp)
1228{
1229 struct roce_create_qp_resp_ramrod_data *p_ramrod;
1230 struct qed_sp_init_data init_data;
1231 union qed_qm_pq_params qm_params;
1232 enum roce_flavor roce_flavor;
1233 struct qed_spq_entry *p_ent;
1234 u16 physical_queue0 = 0;
1235 int rc;
1236
1237 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1238
1239 /* Allocate DMA-able memory for IRQ */
1240 qp->irq_num_pages = 1;
1241 qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1242 RDMA_RING_PAGE_SIZE,
1243 &qp->irq_phys_addr, GFP_KERNEL);
1244 if (!qp->irq) {
1245 rc = -ENOMEM;
1246 DP_NOTICE(p_hwfn,
1247 "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
1248 rc);
1249 return rc;
1250 }
1251
1252 /* Get SPQ entry */
1253 memset(&init_data, 0, sizeof(init_data));
1254 init_data.cid = qp->icid;
1255 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1256 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1257
1258 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
1259 PROTOCOLID_ROCE, &init_data);
1260 if (rc)
1261 goto err;
1262
1263 p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
1264
1265 p_ramrod->flags = 0;
1266
1267 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
1268 SET_FIELD(p_ramrod->flags,
1269 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
1270
1271 SET_FIELD(p_ramrod->flags,
1272 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
1273 qp->incoming_rdma_read_en);
1274
1275 SET_FIELD(p_ramrod->flags,
1276 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
1277 qp->incoming_rdma_write_en);
1278
1279 SET_FIELD(p_ramrod->flags,
1280 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
1281 qp->incoming_atomic_en);
1282
1283 SET_FIELD(p_ramrod->flags,
1284 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
1285 qp->e2e_flow_control_en);
1286
1287 SET_FIELD(p_ramrod->flags,
1288 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
1289
1290 SET_FIELD(p_ramrod->flags,
1291 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
1292 qp->fmr_and_reserved_lkey);
1293
1294 SET_FIELD(p_ramrod->flags,
1295 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
1296 qp->min_rnr_nak_timer);
1297
1298 p_ramrod->max_ird = qp->max_rd_atomic_resp;
1299 p_ramrod->traffic_class = qp->traffic_class_tos;
1300 p_ramrod->hop_limit = qp->hop_limit_ttl;
1301 p_ramrod->irq_num_pages = qp->irq_num_pages;
1302 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1303 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1304 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
1305 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1306 p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
1307 p_ramrod->pd = cpu_to_le16(qp->pd);
1308 p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
1309 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
1310 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
1311 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1312 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
1313 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
1314 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
1315 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
1316 p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
1317 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
1318 qp->rq_cq_id);
1319
1320 memset(&qm_params, 0, sizeof(qm_params));
1321 qm_params.roce.qpid = qp->icid >> 1;
1322 physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
1323
1324 p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
1325 p_ramrod->dpi = cpu_to_le16(qp->dpi);
1326
1327 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
1328 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
1329
1330 p_ramrod->udp_src_port = qp->udp_src_port;
1331 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
1332 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
1333 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
1334
1335 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
1336 qp->stats_queue;
1337
1338 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1339
1340 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
1341 rc, physical_queue0);
1342
1343 if (rc)
1344 goto err;
1345
1346 qp->resp_offloaded = true;
1347
1348 return rc;
1349
1350err:
1351 DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
1352 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1353 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
1354 qp->irq, qp->irq_phys_addr);
1355
1356 return rc;
1357}
1358
1359static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
1360 struct qed_rdma_qp *qp)
1361{
1362 struct roce_create_qp_req_ramrod_data *p_ramrod;
1363 struct qed_sp_init_data init_data;
1364 union qed_qm_pq_params qm_params;
1365 enum roce_flavor roce_flavor;
1366 struct qed_spq_entry *p_ent;
1367 u16 physical_queue0 = 0;
1368 int rc;
1369
1370 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1371
1372 /* Allocate DMA-able memory for ORQ */
1373 qp->orq_num_pages = 1;
1374 qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1375 RDMA_RING_PAGE_SIZE,
1376 &qp->orq_phys_addr, GFP_KERNEL);
1377 if (!qp->orq) {
1378 rc = -ENOMEM;
1379 DP_NOTICE(p_hwfn,
1380 "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
1381 rc);
1382 return rc;
1383 }
1384
1385 /* Get SPQ entry */
1386 memset(&init_data, 0, sizeof(init_data));
1387 init_data.cid = qp->icid + 1;
1388 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1389 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1390
1391 rc = qed_sp_init_request(p_hwfn, &p_ent,
1392 ROCE_RAMROD_CREATE_QP,
1393 PROTOCOLID_ROCE, &init_data);
1394 if (rc)
1395 goto err;
1396
1397 p_ramrod = &p_ent->ramrod.roce_create_qp_req;
1398
1399 p_ramrod->flags = 0;
1400
1401 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
1402 SET_FIELD(p_ramrod->flags,
1403 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
1404
1405 SET_FIELD(p_ramrod->flags,
1406 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
1407 qp->fmr_and_reserved_lkey);
1408
1409 SET_FIELD(p_ramrod->flags,
1410 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
1411
1412 SET_FIELD(p_ramrod->flags,
1413 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
1414
1415 SET_FIELD(p_ramrod->flags,
1416 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
1417 qp->rnr_retry_cnt);
1418
1419 p_ramrod->max_ord = qp->max_rd_atomic_req;
1420 p_ramrod->traffic_class = qp->traffic_class_tos;
1421 p_ramrod->hop_limit = qp->hop_limit_ttl;
1422 p_ramrod->orq_num_pages = qp->orq_num_pages;
1423 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1424 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1425 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
1426 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
1427 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1428 p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
1429 p_ramrod->pd = cpu_to_le16(qp->pd);
1430 p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
1431 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
1432 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
1433 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1434 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
1435 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
1436 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
1437 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
1438 p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
1439 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
1440 qp->sq_cq_id);
1441
1442 memset(&qm_params, 0, sizeof(qm_params));
1443 qm_params.roce.qpid = qp->icid >> 1;
1444 physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
1445
1446 p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
1447 p_ramrod->dpi = cpu_to_le16(qp->dpi);
1448
1449 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
1450 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
1451
1452 p_ramrod->udp_src_port = qp->udp_src_port;
1453 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
1454 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
1455 qp->stats_queue;
1456
1457 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1458
1459 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1460
1461 if (rc)
1462 goto err;
1463
1464 qp->req_offloaded = true;
1465
1466 return rc;
1467
1468err:
1469 DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
1470 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1471 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
1472 qp->orq, qp->orq_phys_addr);
1473 return rc;
1474}
1475
1476static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
1477 struct qed_rdma_qp *qp,
1478 bool move_to_err, u32 modify_flags)
1479{
1480 struct roce_modify_qp_resp_ramrod_data *p_ramrod;
1481 struct qed_sp_init_data init_data;
1482 struct qed_spq_entry *p_ent;
1483 int rc;
1484
1485 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1486
1487 if (move_to_err && !qp->resp_offloaded)
1488 return 0;
1489
1490 /* Get SPQ entry */
1491 memset(&init_data, 0, sizeof(init_data));
1492 init_data.cid = qp->icid;
1493 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1494 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1495
1496 rc = qed_sp_init_request(p_hwfn, &p_ent,
1497 ROCE_EVENT_MODIFY_QP,
1498 PROTOCOLID_ROCE, &init_data);
1499 if (rc) {
1500 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
1501 return rc;
1502 }
1503
1504 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
1505
1506 p_ramrod->flags = 0;
1507
1508 SET_FIELD(p_ramrod->flags,
1509 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
1510
1511 SET_FIELD(p_ramrod->flags,
1512 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
1513 qp->incoming_rdma_read_en);
1514
1515 SET_FIELD(p_ramrod->flags,
1516 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
1517 qp->incoming_rdma_write_en);
1518
1519 SET_FIELD(p_ramrod->flags,
1520 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
1521 qp->incoming_atomic_en);
1522
1523 SET_FIELD(p_ramrod->flags,
1524 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
1525 qp->e2e_flow_control_en);
1526
1527 SET_FIELD(p_ramrod->flags,
1528 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
1529 GET_FIELD(modify_flags,
1530 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
1531
1532 SET_FIELD(p_ramrod->flags,
1533 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
1534 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
1535
1536 SET_FIELD(p_ramrod->flags,
1537 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
1538 GET_FIELD(modify_flags,
1539 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
1540
1541 SET_FIELD(p_ramrod->flags,
1542 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
1543 GET_FIELD(modify_flags,
1544 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
1545
1546 SET_FIELD(p_ramrod->flags,
1547 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
1548 GET_FIELD(modify_flags,
1549 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
1550
1551 p_ramrod->fields = 0;
1552 SET_FIELD(p_ramrod->fields,
1553 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
1554 qp->min_rnr_nak_timer);
1555
1556 p_ramrod->max_ird = qp->max_rd_atomic_resp;
1557 p_ramrod->traffic_class = qp->traffic_class_tos;
1558 p_ramrod->hop_limit = qp->hop_limit_ttl;
1559 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1560 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1561 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1562 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1563 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1564
1565 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
1566 return rc;
1567}
1568
1569static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
1570 struct qed_rdma_qp *qp,
1571 bool move_to_sqd,
1572 bool move_to_err, u32 modify_flags)
1573{
1574 struct roce_modify_qp_req_ramrod_data *p_ramrod;
1575 struct qed_sp_init_data init_data;
1576 struct qed_spq_entry *p_ent;
1577 int rc;
1578
1579 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1580
1581 if (move_to_err && !(qp->req_offloaded))
1582 return 0;
1583
1584 /* Get SPQ entry */
1585 memset(&init_data, 0, sizeof(init_data));
1586 init_data.cid = qp->icid + 1;
1587 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1588 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1589
1590 rc = qed_sp_init_request(p_hwfn, &p_ent,
1591 ROCE_EVENT_MODIFY_QP,
1592 PROTOCOLID_ROCE, &init_data);
1593 if (rc) {
1594 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
1595 return rc;
1596 }
1597
1598 p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
1599
1600 p_ramrod->flags = 0;
1601
1602 SET_FIELD(p_ramrod->flags,
1603 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
1604
1605 SET_FIELD(p_ramrod->flags,
1606 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
1607
1608 SET_FIELD(p_ramrod->flags,
1609 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
1610 qp->sqd_async);
1611
1612 SET_FIELD(p_ramrod->flags,
1613 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
1614 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
1615
1616 SET_FIELD(p_ramrod->flags,
1617 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
1618 GET_FIELD(modify_flags,
1619 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
1620
1621 SET_FIELD(p_ramrod->flags,
1622 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
1623 GET_FIELD(modify_flags,
1624 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
1625
1626 SET_FIELD(p_ramrod->flags,
1627 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
1628 GET_FIELD(modify_flags,
1629 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
1630
1631 SET_FIELD(p_ramrod->flags,
1632 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
1633 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
1634
1635 SET_FIELD(p_ramrod->flags,
1636 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
1637 GET_FIELD(modify_flags,
1638 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
1639
1640 p_ramrod->fields = 0;
1641 SET_FIELD(p_ramrod->fields,
1642 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
1643
1644 SET_FIELD(p_ramrod->fields,
1645 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
1646 qp->rnr_retry_cnt);
1647
1648 p_ramrod->max_ord = qp->max_rd_atomic_req;
1649 p_ramrod->traffic_class = qp->traffic_class_tos;
1650 p_ramrod->hop_limit = qp->hop_limit_ttl;
1651 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1652 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1653 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
1654 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1655 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1656 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1657
1658 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
1659 return rc;
1660}
1661
1662static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
1663 struct qed_rdma_qp *qp,
1664 u32 *num_invalidated_mw)
1665{
1666 struct roce_destroy_qp_resp_output_params *p_ramrod_res;
1667 struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
1668 struct qed_sp_init_data init_data;
1669 struct qed_spq_entry *p_ent;
1670 dma_addr_t ramrod_res_phys;
1671 int rc;
1672
1673 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1674
1675 if (!qp->resp_offloaded)
1676 return 0;
1677
1678 /* Get SPQ entry */
1679 memset(&init_data, 0, sizeof(init_data));
1680 init_data.cid = qp->icid;
1681 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1682 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1683
1684 rc = qed_sp_init_request(p_hwfn, &p_ent,
1685 ROCE_RAMROD_DESTROY_QP,
1686 PROTOCOLID_ROCE, &init_data);
1687 if (rc)
1688 return rc;
1689
1690 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
1691
1692 p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
1693 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
1694 &ramrod_res_phys, GFP_KERNEL);
1695
1696 if (!p_ramrod_res) {
1697 rc = -ENOMEM;
1698 DP_NOTICE(p_hwfn,
1699 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
1700 rc);
1701 return rc;
1702 }
1703
1704 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1705
1706 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1707 if (rc)
1708 goto err;
1709
1710 *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
1711
1712 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
1713 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1714 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
1715 qp->irq, qp->irq_phys_addr);
1716
1717 qp->resp_offloaded = false;
1718
1719 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
1720
1721err:
1722 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1723 sizeof(struct roce_destroy_qp_resp_output_params),
1724 p_ramrod_res, ramrod_res_phys);
1725
1726 return rc;
1727}
1728
1729static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
1730 struct qed_rdma_qp *qp,
1731 u32 *num_bound_mw)
1732{
1733 struct roce_destroy_qp_req_output_params *p_ramrod_res;
1734 struct roce_destroy_qp_req_ramrod_data *p_ramrod;
1735 struct qed_sp_init_data init_data;
1736 struct qed_spq_entry *p_ent;
1737 dma_addr_t ramrod_res_phys;
1738 int rc = -ENOMEM;
1739
1740 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1741
1742 if (!qp->req_offloaded)
1743 return 0;
1744
1745 p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
1746 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1747 sizeof(*p_ramrod_res),
1748 &ramrod_res_phys, GFP_KERNEL);
1749 if (!p_ramrod_res) {
1750 DP_NOTICE(p_hwfn,
1751 "qed destroy requester failed: cannot allocate memory (ramrod)\n");
1752 return rc;
1753 }
1754
1755 /* Get SPQ entry */
1756 memset(&init_data, 0, sizeof(init_data));
1757 init_data.cid = qp->icid + 1;
1758 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1759 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1760
1761 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
1762 PROTOCOLID_ROCE, &init_data);
1763 if (rc)
1764 goto err;
1765
1766 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
1767 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1768
1769 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1770 if (rc)
1771 goto err;
1772
1773 *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
1774
1775 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
1776 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1777 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
1778 qp->orq, qp->orq_phys_addr);
1779
1780 qp->req_offloaded = false;
1781
1782 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
1783
1784err:
1785 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
1786 p_ramrod_res, ramrod_res_phys);
1787
1788 return rc;
1789}
1790
1791int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
1792 struct qed_rdma_qp *qp,
1793 struct qed_rdma_query_qp_out_params *out_params)
1794{
1795 struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
1796 struct roce_query_qp_req_output_params *p_req_ramrod_res;
1797 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
1798 struct roce_query_qp_req_ramrod_data *p_req_ramrod;
1799 struct qed_sp_init_data init_data;
1800 dma_addr_t resp_ramrod_res_phys;
1801 dma_addr_t req_ramrod_res_phys;
1802 struct qed_spq_entry *p_ent;
1803 bool rq_err_state;
1804 bool sq_err_state;
1805 bool sq_draining;
1806 int rc = -ENOMEM;
1807
1808 if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
1809 /* We can't send ramrod to the fw since this qp wasn't offloaded
1810 * to the fw yet
1811 */
1812 out_params->draining = false;
1813 out_params->rq_psn = qp->rq_psn;
1814 out_params->sq_psn = qp->sq_psn;
1815 out_params->state = qp->cur_state;
1816
1817 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
1818 return 0;
1819 }
1820
1821 if (!(qp->resp_offloaded)) {
1822 DP_NOTICE(p_hwfn,
1823 "The responder's qp should be offloded before requester's\n");
1824 return -EINVAL;
1825 }
1826
1827 /* Send a query responder ramrod to FW to get RQ-PSN and state */
1828 p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
1829 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1830 sizeof(*p_resp_ramrod_res),
1831 &resp_ramrod_res_phys, GFP_KERNEL);
1832 if (!p_resp_ramrod_res) {
1833 DP_NOTICE(p_hwfn,
1834 "qed query qp failed: cannot allocate memory (ramrod)\n");
1835 return rc;
1836 }
1837
1838 /* Get SPQ entry */
1839 memset(&init_data, 0, sizeof(init_data));
1840 init_data.cid = qp->icid;
1841 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1842 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1843 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
1844 PROTOCOLID_ROCE, &init_data);
1845 if (rc)
1846 goto err_resp;
1847
1848 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
1849 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
1850
1851 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1852 if (rc)
1853 goto err_resp;
1854
1855 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
1856 p_resp_ramrod_res, resp_ramrod_res_phys);
1857
1858 out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
1859 rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
1860 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
1861
1862 if (!(qp->req_offloaded)) {
1863 /* Don't send query qp for the requester */
1864 out_params->sq_psn = qp->sq_psn;
1865 out_params->draining = false;
1866
1867 if (rq_err_state)
1868 qp->cur_state = QED_ROCE_QP_STATE_ERR;
1869
1870 out_params->state = qp->cur_state;
1871
1872 return 0;
1873 }
1874
1875 /* Send a query requester ramrod to FW to get SQ-PSN and state */
1876 p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
1877 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1878 sizeof(*p_req_ramrod_res),
1879 &req_ramrod_res_phys,
1880 GFP_KERNEL);
1881 if (!p_req_ramrod_res) {
1882 rc = -ENOMEM;
1883 DP_NOTICE(p_hwfn,
1884 "qed query qp failed: cannot allocate memory (ramrod)\n");
1885 return rc;
1886 }
1887
1888 /* Get SPQ entry */
1889 init_data.cid = qp->icid + 1;
1890 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
1891 PROTOCOLID_ROCE, &init_data);
1892 if (rc)
1893 goto err_req;
1894
1895 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
1896 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
1897
1898 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1899 if (rc)
1900 goto err_req;
1901
1902 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
1903 p_req_ramrod_res, req_ramrod_res_phys);
1904
1905 out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
1906 sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
1907 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
1908 sq_draining =
1909 GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
1910 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
1911
1912 out_params->draining = false;
1913
1914 if (rq_err_state)
1915 qp->cur_state = QED_ROCE_QP_STATE_ERR;
1916 else if (sq_err_state)
1917 qp->cur_state = QED_ROCE_QP_STATE_SQE;
1918 else if (sq_draining)
1919 out_params->draining = true;
1920 out_params->state = qp->cur_state;
1921
1922 return 0;
1923
1924err_req:
1925 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
1926 p_req_ramrod_res, req_ramrod_res_phys);
1927 return rc;
1928err_resp:
1929 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
1930 p_resp_ramrod_res, resp_ramrod_res_phys);
1931 return rc;
1932}
1933
1934int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
1935{
1936 u32 num_invalidated_mw = 0;
1937 u32 num_bound_mw = 0;
1938 u32 start_cid;
1939 int rc;
1940
1941 /* Destroys the specified QP */
1942 if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
1943 (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
1944 (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
1945 DP_NOTICE(p_hwfn,
1946 "QP must be in error, reset or init state before destroying it\n");
1947 return -EINVAL;
1948 }
1949
1950 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &num_invalidated_mw);
1951 if (rc)
1952 return rc;
1953
1954 /* Send destroy requester ramrod */
1955 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, &num_bound_mw);
1956 if (rc)
1957 return rc;
1958
1959 if (num_invalidated_mw != num_bound_mw) {
1960 DP_NOTICE(p_hwfn,
1961 "number of invalidate memory windows is different from bounded ones\n");
1962 return -EINVAL;
1963 }
1964
1965 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1966
1967 start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
1968 p_hwfn->p_rdma_info->proto);
1969
1970 /* Release responder's icid */
1971 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
1972 qp->icid - start_cid);
1973
1974 /* Release requester's icid */
1975 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
1976 qp->icid + 1 - start_cid);
1977
1978 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1979
1980 return 0;
1981}
1982
Yuval Mintz0189efb2016-10-13 22:57:02 +03001983static int qed_rdma_query_qp(void *rdma_cxt,
1984 struct qed_rdma_qp *qp,
1985 struct qed_rdma_query_qp_out_params *out_params)
Ram Amranif1093942016-10-01 21:59:59 +03001986{
1987 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1988 int rc;
1989
1990 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1991
1992 /* The following fields are filled in from qp and not FW as they can't
1993 * be modified by FW
1994 */
1995 out_params->mtu = qp->mtu;
1996 out_params->dest_qp = qp->dest_qp;
1997 out_params->incoming_atomic_en = qp->incoming_atomic_en;
1998 out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
1999 out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
2000 out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
2001 out_params->dgid = qp->dgid;
2002 out_params->flow_label = qp->flow_label;
2003 out_params->hop_limit_ttl = qp->hop_limit_ttl;
2004 out_params->traffic_class_tos = qp->traffic_class_tos;
2005 out_params->timeout = qp->ack_timeout;
2006 out_params->rnr_retry = qp->rnr_retry_cnt;
2007 out_params->retry_cnt = qp->retry_cnt;
2008 out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
2009 out_params->pkey_index = 0;
2010 out_params->max_rd_atomic = qp->max_rd_atomic_req;
2011 out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
2012 out_params->sqd_async = qp->sqd_async;
2013
2014 rc = qed_roce_query_qp(p_hwfn, qp, out_params);
2015
2016 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
2017 return rc;
2018}
2019
Yuval Mintz0189efb2016-10-13 22:57:02 +03002020static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
Ram Amranif1093942016-10-01 21:59:59 +03002021{
2022 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2023 int rc = 0;
2024
2025 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
2026
2027 rc = qed_roce_destroy_qp(p_hwfn, qp);
2028
2029 /* free qp params struct */
2030 kfree(qp);
2031
2032 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
2033 return rc;
2034}
2035
2036struct qed_rdma_qp *
2037qed_rdma_create_qp(void *rdma_cxt,
2038 struct qed_rdma_create_qp_in_params *in_params,
2039 struct qed_rdma_create_qp_out_params *out_params)
2040{
2041 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2042 struct qed_rdma_qp *qp;
2043 u8 max_stats_queues;
2044 int rc;
2045
2046 if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
2047 DP_ERR(p_hwfn->cdev,
2048 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
2049 rdma_cxt, in_params, out_params);
2050 return NULL;
2051 }
2052
2053 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2054 "qed rdma create qp called with qp_handle = %08x%08x\n",
2055 in_params->qp_handle_hi, in_params->qp_handle_lo);
2056
2057 /* Some sanity checks... */
2058 max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
2059 if (in_params->stats_queue >= max_stats_queues) {
2060 DP_ERR(p_hwfn->cdev,
2061 "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
2062 in_params->stats_queue, max_stats_queues);
2063 return NULL;
2064 }
2065
2066 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2067 if (!qp) {
2068 DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n");
2069 return NULL;
2070 }
2071
2072 rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
2073 qp->qpid = ((0xFF << 16) | qp->icid);
2074
2075 DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
2076
2077 if (rc) {
2078 kfree(qp);
2079 return NULL;
2080 }
2081
2082 qp->cur_state = QED_ROCE_QP_STATE_RESET;
2083 qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
2084 qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
2085 qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
2086 qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
2087 qp->use_srq = in_params->use_srq;
2088 qp->signal_all = in_params->signal_all;
2089 qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
2090 qp->pd = in_params->pd;
2091 qp->dpi = in_params->dpi;
2092 qp->sq_cq_id = in_params->sq_cq_id;
2093 qp->sq_num_pages = in_params->sq_num_pages;
2094 qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
2095 qp->rq_cq_id = in_params->rq_cq_id;
2096 qp->rq_num_pages = in_params->rq_num_pages;
2097 qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
2098 qp->srq_id = in_params->srq_id;
2099 qp->req_offloaded = false;
2100 qp->resp_offloaded = false;
2101 qp->e2e_flow_control_en = qp->use_srq ? false : true;
2102 qp->stats_queue = in_params->stats_queue;
2103
2104 out_params->icid = qp->icid;
2105 out_params->qp_id = qp->qpid;
2106
2107 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
2108 return qp;
2109}
2110
2111static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
2112 struct qed_rdma_qp *qp,
2113 enum qed_roce_qp_state prev_state,
2114 struct qed_rdma_modify_qp_in_params *params)
2115{
2116 u32 num_invalidated_mw = 0, num_bound_mw = 0;
2117 int rc = 0;
2118
2119 /* Perform additional operations according to the current state and the
2120 * next state
2121 */
2122 if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
2123 (prev_state == QED_ROCE_QP_STATE_RESET)) &&
2124 (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
2125 /* Init->RTR or Reset->RTR */
2126 rc = qed_roce_sp_create_responder(p_hwfn, qp);
2127 return rc;
2128 } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
2129 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2130 /* RTR-> RTS */
2131 rc = qed_roce_sp_create_requester(p_hwfn, qp);
2132 if (rc)
2133 return rc;
2134
2135 /* Send modify responder ramrod */
2136 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2137 params->modify_flags);
2138 return rc;
2139 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
2140 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2141 /* RTS->RTS */
2142 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2143 params->modify_flags);
2144 if (rc)
2145 return rc;
2146
2147 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2148 params->modify_flags);
2149 return rc;
2150 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
2151 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
2152 /* RTS->SQD */
2153 rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
2154 params->modify_flags);
2155 return rc;
2156 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
2157 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
2158 /* SQD->SQD */
2159 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2160 params->modify_flags);
2161 if (rc)
2162 return rc;
2163
2164 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2165 params->modify_flags);
2166 return rc;
2167 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
2168 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2169 /* SQD->RTS */
2170 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2171 params->modify_flags);
2172 if (rc)
2173 return rc;
2174
2175 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2176 params->modify_flags);
2177
2178 return rc;
2179 } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR ||
2180 qp->cur_state == QED_ROCE_QP_STATE_SQE) {
2181 /* ->ERR */
2182 rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
2183 params->modify_flags);
2184 if (rc)
2185 return rc;
2186
2187 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
2188 params->modify_flags);
2189 return rc;
2190 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
2191 /* Any state -> RESET */
2192
2193 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
2194 &num_invalidated_mw);
2195 if (rc)
2196 return rc;
2197
2198 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
2199 &num_bound_mw);
2200
2201 if (num_invalidated_mw != num_bound_mw) {
2202 DP_NOTICE(p_hwfn,
2203 "number of invalidate memory windows is different from bounded ones\n");
2204 return -EINVAL;
2205 }
2206 } else {
2207 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
2208 }
2209
2210 return rc;
2211}
2212
Yuval Mintz0189efb2016-10-13 22:57:02 +03002213static int qed_rdma_modify_qp(void *rdma_cxt,
2214 struct qed_rdma_qp *qp,
2215 struct qed_rdma_modify_qp_in_params *params)
Ram Amranif1093942016-10-01 21:59:59 +03002216{
2217 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2218 enum qed_roce_qp_state prev_state;
2219 int rc = 0;
2220
2221 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
2222 qp->icid, params->new_state);
2223
2224 if (rc) {
2225 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2226 return rc;
2227 }
2228
2229 if (GET_FIELD(params->modify_flags,
2230 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
2231 qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
2232 qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
2233 qp->incoming_atomic_en = params->incoming_atomic_en;
2234 }
2235
2236 /* Update QP structure with the updated values */
2237 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
2238 qp->roce_mode = params->roce_mode;
2239 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
2240 qp->pkey = params->pkey;
2241 if (GET_FIELD(params->modify_flags,
2242 QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
2243 qp->e2e_flow_control_en = params->e2e_flow_control_en;
2244 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
2245 qp->dest_qp = params->dest_qp;
2246 if (GET_FIELD(params->modify_flags,
2247 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
2248 /* Indicates that the following parameters have changed:
2249 * Traffic class, flow label, hop limit, source GID,
2250 * destination GID, loopback indicator
2251 */
2252 qp->traffic_class_tos = params->traffic_class_tos;
2253 qp->flow_label = params->flow_label;
2254 qp->hop_limit_ttl = params->hop_limit_ttl;
2255
2256 qp->sgid = params->sgid;
2257 qp->dgid = params->dgid;
2258 qp->udp_src_port = 0;
2259 qp->vlan_id = params->vlan_id;
2260 qp->mtu = params->mtu;
2261 qp->lb_indication = params->lb_indication;
2262 memcpy((u8 *)&qp->remote_mac_addr[0],
2263 (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
2264 if (params->use_local_mac) {
2265 memcpy((u8 *)&qp->local_mac_addr[0],
2266 (u8 *)&params->local_mac_addr[0], ETH_ALEN);
2267 } else {
2268 memcpy((u8 *)&qp->local_mac_addr[0],
2269 (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
2270 }
2271 }
2272 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
2273 qp->rq_psn = params->rq_psn;
2274 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
2275 qp->sq_psn = params->sq_psn;
2276 if (GET_FIELD(params->modify_flags,
2277 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
2278 qp->max_rd_atomic_req = params->max_rd_atomic_req;
2279 if (GET_FIELD(params->modify_flags,
2280 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
2281 qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
2282 if (GET_FIELD(params->modify_flags,
2283 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
2284 qp->ack_timeout = params->ack_timeout;
2285 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
2286 qp->retry_cnt = params->retry_cnt;
2287 if (GET_FIELD(params->modify_flags,
2288 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
2289 qp->rnr_retry_cnt = params->rnr_retry_cnt;
2290 if (GET_FIELD(params->modify_flags,
2291 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
2292 qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
2293
2294 qp->sqd_async = params->sqd_async;
2295
2296 prev_state = qp->cur_state;
2297 if (GET_FIELD(params->modify_flags,
2298 QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
2299 qp->cur_state = params->new_state;
2300 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
2301 qp->cur_state);
2302 }
2303
2304 rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
2305
2306 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
2307 return rc;
2308}
2309
Yuval Mintz0189efb2016-10-13 22:57:02 +03002310static int
2311qed_rdma_register_tid(void *rdma_cxt,
2312 struct qed_rdma_register_tid_in_params *params)
Ram Amraniee8eaea2016-10-01 22:00:00 +03002313{
2314 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2315 struct rdma_register_tid_ramrod_data *p_ramrod;
2316 struct qed_sp_init_data init_data;
2317 struct qed_spq_entry *p_ent;
2318 enum rdma_tid_type tid_type;
2319 u8 fw_return_code;
2320 int rc;
2321
2322 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
2323
2324 /* Get SPQ entry */
2325 memset(&init_data, 0, sizeof(init_data));
2326 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2327 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2328
2329 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
2330 p_hwfn->p_rdma_info->proto, &init_data);
2331 if (rc) {
2332 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2333 return rc;
2334 }
2335
2336 if (p_hwfn->p_rdma_info->last_tid < params->itid)
2337 p_hwfn->p_rdma_info->last_tid = params->itid;
2338
2339 p_ramrod = &p_ent->ramrod.rdma_register_tid;
2340
2341 p_ramrod->flags = 0;
2342 SET_FIELD(p_ramrod->flags,
2343 RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
2344 params->pbl_two_level);
2345
2346 SET_FIELD(p_ramrod->flags,
2347 RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
2348
2349 SET_FIELD(p_ramrod->flags,
2350 RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
2351
2352 /* Don't initialize D/C field, as it may override other bits. */
2353 if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
2354 SET_FIELD(p_ramrod->flags,
2355 RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
2356 params->page_size_log - 12);
2357
2358 SET_FIELD(p_ramrod->flags,
2359 RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID,
2360 p_hwfn->p_rdma_info->last_tid);
2361
2362 SET_FIELD(p_ramrod->flags,
2363 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
2364 params->remote_read);
2365
2366 SET_FIELD(p_ramrod->flags,
2367 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
2368 params->remote_write);
2369
2370 SET_FIELD(p_ramrod->flags,
2371 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
2372 params->remote_atomic);
2373
2374 SET_FIELD(p_ramrod->flags,
2375 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
2376 params->local_write);
2377
2378 SET_FIELD(p_ramrod->flags,
2379 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
2380
2381 SET_FIELD(p_ramrod->flags,
2382 RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
2383 params->mw_bind);
2384
2385 SET_FIELD(p_ramrod->flags1,
2386 RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
2387 params->pbl_page_size_log - 12);
2388
2389 SET_FIELD(p_ramrod->flags2,
2390 RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
2391
2392 switch (params->tid_type) {
2393 case QED_RDMA_TID_REGISTERED_MR:
2394 tid_type = RDMA_TID_REGISTERED_MR;
2395 break;
2396 case QED_RDMA_TID_FMR:
2397 tid_type = RDMA_TID_FMR;
2398 break;
2399 case QED_RDMA_TID_MW_TYPE1:
2400 tid_type = RDMA_TID_MW_TYPE1;
2401 break;
2402 case QED_RDMA_TID_MW_TYPE2A:
2403 tid_type = RDMA_TID_MW_TYPE2A;
2404 break;
2405 default:
2406 rc = -EINVAL;
2407 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2408 return rc;
2409 }
2410 SET_FIELD(p_ramrod->flags1,
2411 RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
2412
2413 p_ramrod->itid = cpu_to_le32(params->itid);
2414 p_ramrod->key = params->key;
2415 p_ramrod->pd = cpu_to_le16(params->pd);
2416 p_ramrod->length_hi = (u8)(params->length >> 32);
2417 p_ramrod->length_lo = DMA_LO_LE(params->length);
2418 if (params->zbva) {
2419 /* Lower 32 bits of the registered MR address.
2420 * In case of zero based MR, will hold FBO
2421 */
2422 p_ramrod->va.hi = 0;
2423 p_ramrod->va.lo = cpu_to_le32(params->fbo);
2424 } else {
2425 DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
2426 }
2427 DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
2428
2429 /* DIF */
2430 if (params->dif_enabled) {
2431 SET_FIELD(p_ramrod->flags2,
2432 RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
2433 DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
2434 params->dif_error_addr);
2435 DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
2436 }
2437
2438 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2439
2440 if (fw_return_code != RDMA_RETURN_OK) {
2441 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
2442 return -EINVAL;
2443 }
2444
2445 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
2446 return rc;
2447}
2448
Yuval Mintz0189efb2016-10-13 22:57:02 +03002449static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
Ram Amraniee8eaea2016-10-01 22:00:00 +03002450{
2451 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2452 struct rdma_deregister_tid_ramrod_data *p_ramrod;
2453 struct qed_sp_init_data init_data;
2454 struct qed_spq_entry *p_ent;
2455 struct qed_ptt *p_ptt;
2456 u8 fw_return_code;
2457 int rc;
2458
2459 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
2460
2461 /* Get SPQ entry */
2462 memset(&init_data, 0, sizeof(init_data));
2463 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2464 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2465
2466 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
2467 p_hwfn->p_rdma_info->proto, &init_data);
2468 if (rc) {
2469 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2470 return rc;
2471 }
2472
2473 p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
2474 p_ramrod->itid = cpu_to_le32(itid);
2475
2476 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2477 if (rc) {
2478 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2479 return rc;
2480 }
2481
2482 if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
2483 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
2484 return -EINVAL;
2485 } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
2486 /* Bit indicating that the TID is in use and a nig drain is
2487 * required before sending the ramrod again
2488 */
2489 p_ptt = qed_ptt_acquire(p_hwfn);
2490 if (!p_ptt) {
2491 rc = -EBUSY;
2492 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2493 "Failed to acquire PTT\n");
2494 return rc;
2495 }
2496
2497 rc = qed_mcp_drain(p_hwfn, p_ptt);
2498 if (rc) {
2499 qed_ptt_release(p_hwfn, p_ptt);
2500 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2501 "Drain failed\n");
2502 return rc;
2503 }
2504
2505 qed_ptt_release(p_hwfn, p_ptt);
2506
2507 /* Resend the ramrod */
2508 rc = qed_sp_init_request(p_hwfn, &p_ent,
2509 RDMA_RAMROD_DEREGISTER_MR,
2510 p_hwfn->p_rdma_info->proto,
2511 &init_data);
2512 if (rc) {
2513 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2514 "Failed to init sp-element\n");
2515 return rc;
2516 }
2517
2518 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2519 if (rc) {
2520 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2521 "Ramrod failed\n");
2522 return rc;
2523 }
2524
2525 if (fw_return_code != RDMA_RETURN_OK) {
2526 DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
2527 fw_return_code);
2528 return rc;
2529 }
2530 }
2531
2532 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
2533 return rc;
2534}
2535
Ram Amrani51ff1722016-10-01 21:59:57 +03002536static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
2537{
2538 return QED_LEADING_HWFN(cdev);
2539}
2540
2541static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2542{
2543 u32 val;
2544
2545 val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
2546
2547 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
2548 DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
2549 "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
2550 val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
2551}
2552
2553void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2554{
2555 p_hwfn->db_bar_no_edpm = true;
2556
2557 qed_rdma_dpm_conf(p_hwfn, p_ptt);
2558}
2559
Yuval Mintz0189efb2016-10-13 22:57:02 +03002560static int qed_rdma_start(void *rdma_cxt,
2561 struct qed_rdma_start_in_params *params)
Ram Amrani51ff1722016-10-01 21:59:57 +03002562{
2563 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2564 struct qed_ptt *p_ptt;
2565 int rc = -EBUSY;
2566
2567 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2568 "desired_cnq = %08x\n", params->desired_cnq);
2569
2570 p_ptt = qed_ptt_acquire(p_hwfn);
2571 if (!p_ptt)
2572 goto err;
2573
2574 rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
2575 if (rc)
2576 goto err1;
2577
2578 rc = qed_rdma_setup(p_hwfn, p_ptt, params);
2579 if (rc)
2580 goto err2;
2581
2582 qed_ptt_release(p_hwfn, p_ptt);
2583
2584 return rc;
2585
2586err2:
2587 qed_rdma_free(p_hwfn);
2588err1:
2589 qed_ptt_release(p_hwfn, p_ptt);
2590err:
2591 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
2592 return rc;
2593}
2594
2595static int qed_rdma_init(struct qed_dev *cdev,
2596 struct qed_rdma_start_in_params *params)
2597{
2598 return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
2599}
2600
Yuval Mintz0189efb2016-10-13 22:57:02 +03002601static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
Ram Amrani51ff1722016-10-01 21:59:57 +03002602{
2603 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2604
2605 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
2606
2607 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
2608 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
2609 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
2610}
2611
Ram Amraniabd49672016-10-01 22:00:01 +03002612void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
2613 u8 connection_handle,
2614 void *cookie,
2615 dma_addr_t first_frag_addr,
2616 bool b_last_fragment, bool b_last_packet)
2617{
2618 struct qed_roce_ll2_packet *packet = cookie;
2619 struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
2620
2621 roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet);
2622}
2623
2624void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
2625 u8 connection_handle,
2626 void *cookie,
2627 dma_addr_t first_frag_addr,
2628 bool b_last_fragment, bool b_last_packet)
2629{
2630 qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle,
2631 cookie, first_frag_addr,
2632 b_last_fragment, b_last_packet);
2633}
2634
2635void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
2636 u8 connection_handle,
2637 void *cookie,
2638 dma_addr_t rx_buf_addr,
2639 u16 data_length,
2640 u8 data_length_error,
2641 u16 parse_flags,
2642 u16 vlan,
2643 u32 src_mac_addr_hi,
2644 u16 src_mac_addr_lo, bool b_last_packet)
2645{
2646 struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
2647 struct qed_roce_ll2_rx_params params;
2648 struct qed_dev *cdev = p_hwfn->cdev;
2649 struct qed_roce_ll2_packet pkt;
2650
2651 DP_VERBOSE(cdev,
2652 QED_MSG_LL2,
2653 "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
2654 (void *)(uintptr_t)rx_buf_addr,
2655 data_length, data_length_error);
2656
2657 memset(&pkt, 0, sizeof(pkt));
2658 pkt.n_seg = 1;
2659 pkt.payload[0].baddr = rx_buf_addr;
2660 pkt.payload[0].len = data_length;
2661
2662 memset(&params, 0, sizeof(params));
2663 params.vlan_id = vlan;
2664 *((u32 *)&params.smac[0]) = ntohl(src_mac_addr_hi);
2665 *((u16 *)&params.smac[4]) = ntohs(src_mac_addr_lo);
2666
2667 if (data_length_error) {
2668 DP_ERR(cdev,
2669 "roce ll2 rx complete: data length error %d, length=%d\n",
2670 data_length_error, data_length);
2671 params.rc = -EINVAL;
2672 }
2673
2674 roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, &params);
2675}
2676
2677static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
2678 u8 *old_mac_address,
2679 u8 *new_mac_address)
2680{
2681 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2682 struct qed_ptt *p_ptt;
2683 int rc = 0;
2684
2685 if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) {
2686 DP_ERR(cdev,
2687 "qed roce mac filter failed - roce_info/ll2 NULL\n");
2688 return -EINVAL;
2689 }
2690
2691 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2692 if (!p_ptt) {
2693 DP_ERR(cdev,
2694 "qed roce ll2 mac filter set: failed to acquire PTT\n");
2695 return -EINVAL;
2696 }
2697
2698 mutex_lock(&hwfn->ll2->lock);
2699 if (old_mac_address)
2700 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2701 old_mac_address);
2702 if (new_mac_address)
2703 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2704 new_mac_address);
2705 mutex_unlock(&hwfn->ll2->lock);
2706
2707 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2708
2709 if (rc)
2710 DP_ERR(cdev,
2711 "qed roce ll2 mac filter set: failed to add mac filter\n");
2712
2713 return rc;
2714}
2715
2716static int qed_roce_ll2_start(struct qed_dev *cdev,
2717 struct qed_roce_ll2_params *params)
2718{
2719 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2720 struct qed_roce_ll2_info *roce_ll2;
2721 struct qed_ll2_info ll2_params;
2722 int rc;
2723
2724 if (!params) {
2725 DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n");
2726 return -EINVAL;
2727 }
2728 if (!params->cbs.tx_cb || !params->cbs.rx_cb) {
2729 DP_ERR(cdev,
2730 "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
2731 params->cbs.tx_cb, params->cbs.rx_cb);
2732 return -EINVAL;
2733 }
2734 if (!is_valid_ether_addr(params->mac_address)) {
2735 DP_ERR(cdev,
2736 "qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
2737 params->mac_address);
2738 return -EINVAL;
2739 }
2740
2741 /* Initialize */
2742 roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC);
2743 if (!roce_ll2) {
2744 DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
2745 return -ENOMEM;
2746 }
2747 memset(roce_ll2, 0, sizeof(*roce_ll2));
2748 roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
2749 roce_ll2->cbs = params->cbs;
2750 roce_ll2->cb_cookie = params->cb_cookie;
2751 mutex_init(&roce_ll2->lock);
2752
2753 memset(&ll2_params, 0, sizeof(ll2_params));
2754 ll2_params.conn_type = QED_LL2_TYPE_ROCE;
2755 ll2_params.mtu = params->mtu;
2756 ll2_params.rx_drop_ttl0_flg = true;
2757 ll2_params.rx_vlan_removal_en = false;
2758 ll2_params.tx_dest = CORE_TX_DEST_NW;
2759 ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET;
2760 ll2_params.ai_err_no_buf = LL2_DROP_PACKET;
2761 ll2_params.gsi_enable = true;
2762
2763 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params,
2764 params->max_rx_buffers,
2765 params->max_tx_buffers,
2766 &roce_ll2->handle);
2767 if (rc) {
2768 DP_ERR(cdev,
2769 "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
2770 rc);
2771 goto err;
2772 }
2773
2774 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2775 roce_ll2->handle);
2776 if (rc) {
2777 DP_ERR(cdev,
2778 "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
2779 rc);
2780 goto err1;
2781 }
2782
2783 hwfn->ll2 = roce_ll2;
2784
2785 rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address);
2786 if (rc) {
2787 hwfn->ll2 = NULL;
2788 goto err2;
2789 }
2790 ether_addr_copy(roce_ll2->mac_address, params->mac_address);
2791
2792 return 0;
2793
2794err2:
2795 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
2796err1:
2797 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
2798err:
2799 kfree(roce_ll2);
2800 return rc;
2801}
2802
2803static int qed_roce_ll2_stop(struct qed_dev *cdev)
2804{
2805 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2806 struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
2807 int rc;
2808
Ram Amraniabd49672016-10-01 22:00:01 +03002809 if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
2810 DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
2811 return -EINVAL;
2812 }
2813
2814 /* remove LL2 MAC address filter */
2815 rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL);
2816 eth_zero_addr(roce_ll2->mac_address);
2817
2818 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2819 roce_ll2->handle);
2820 if (rc)
2821 DP_ERR(cdev,
2822 "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n",
2823 rc);
2824
2825 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
2826
2827 roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
2828
2829 kfree(roce_ll2);
2830
2831 return rc;
2832}
2833
2834static int qed_roce_ll2_tx(struct qed_dev *cdev,
2835 struct qed_roce_ll2_packet *pkt,
2836 struct qed_roce_ll2_tx_params *params)
2837{
2838 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2839 struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
2840 enum qed_ll2_roce_flavor_type qed_roce_flavor;
2841 u8 flags = 0;
2842 int rc;
2843 int i;
2844
Yuval Mintzce6b04e2016-10-13 22:57:01 +03002845 if (!pkt || !params) {
Ram Amraniabd49672016-10-01 22:00:01 +03002846 DP_ERR(cdev,
2847 "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
2848 cdev, pkt, params);
2849 return -EINVAL;
2850 }
2851
2852 qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE
2853 : QED_LL2_RROCE;
2854
2855 if (pkt->roce_mode == ROCE_V2_IPV4)
2856 flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
2857
2858 /* Tx header */
2859 rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
2860 1 + pkt->n_seg, 0, flags, 0,
2861 qed_roce_flavor, pkt->header.baddr,
2862 pkt->header.len, pkt, 1);
2863 if (rc) {
2864 DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc);
2865 return QED_ROCE_TX_HEAD_FAILURE;
2866 }
2867
2868 /* Tx payload */
2869 for (i = 0; i < pkt->n_seg; i++) {
2870 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2871 roce_ll2->handle,
2872 pkt->payload[i].baddr,
2873 pkt->payload[i].len);
2874 if (rc) {
2875 /* If failed not much to do here, partial packet has
2876 * been posted * we can't free memory, will need to wait
2877 * for completion
2878 */
2879 DP_ERR(cdev,
2880 "roce ll2 tx: payload failed (rc=%d)\n", rc);
2881 return QED_ROCE_TX_FRAG_FAILURE;
2882 }
2883 }
2884
2885 return 0;
2886}
2887
2888static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev,
2889 struct qed_roce_ll2_buffer *buf,
2890 u64 cookie, u8 notify_fw)
2891{
2892 return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2893 QED_LEADING_HWFN(cdev)->ll2->handle,
2894 buf->baddr, buf->len,
2895 (void *)(uintptr_t)cookie, notify_fw);
2896}
2897
2898static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2899{
2900 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2901 struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
2902
2903 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2904 roce_ll2->handle, stats);
2905}
2906
Ram Amrani51ff1722016-10-01 21:59:57 +03002907static const struct qed_rdma_ops qed_rdma_ops_pass = {
2908 .common = &qed_common_ops_pass,
2909 .fill_dev_info = &qed_fill_rdma_dev_info,
2910 .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
2911 .rdma_init = &qed_rdma_init,
2912 .rdma_add_user = &qed_rdma_add_user,
2913 .rdma_remove_user = &qed_rdma_remove_user,
2914 .rdma_stop = &qed_rdma_stop,
Ram Amranic295f862016-10-01 21:59:58 +03002915 .rdma_query_port = &qed_rdma_query_port,
Ram Amrani51ff1722016-10-01 21:59:57 +03002916 .rdma_query_device = &qed_rdma_query_device,
2917 .rdma_get_start_sb = &qed_rdma_get_sb_start,
2918 .rdma_get_rdma_int = &qed_rdma_get_int,
2919 .rdma_set_rdma_int = &qed_rdma_set_int,
2920 .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
2921 .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
Ram Amranic295f862016-10-01 21:59:58 +03002922 .rdma_alloc_pd = &qed_rdma_alloc_pd,
2923 .rdma_dealloc_pd = &qed_rdma_free_pd,
2924 .rdma_create_cq = &qed_rdma_create_cq,
2925 .rdma_destroy_cq = &qed_rdma_destroy_cq,
Ram Amranif1093942016-10-01 21:59:59 +03002926 .rdma_create_qp = &qed_rdma_create_qp,
2927 .rdma_modify_qp = &qed_rdma_modify_qp,
2928 .rdma_query_qp = &qed_rdma_query_qp,
2929 .rdma_destroy_qp = &qed_rdma_destroy_qp,
Ram Amraniee8eaea2016-10-01 22:00:00 +03002930 .rdma_alloc_tid = &qed_rdma_alloc_tid,
2931 .rdma_free_tid = &qed_rdma_free_tid,
2932 .rdma_register_tid = &qed_rdma_register_tid,
2933 .rdma_deregister_tid = &qed_rdma_deregister_tid,
Ram Amraniabd49672016-10-01 22:00:01 +03002934 .roce_ll2_start = &qed_roce_ll2_start,
2935 .roce_ll2_stop = &qed_roce_ll2_stop,
2936 .roce_ll2_tx = &qed_roce_ll2_tx,
2937 .roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer,
2938 .roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
2939 .roce_ll2_stats = &qed_roce_ll2_stats,
Ram Amrani51ff1722016-10-01 21:59:57 +03002940};
2941
Arnd Bergmannd4e99132016-10-10 13:59:16 +02002942const struct qed_rdma_ops *qed_get_rdma_ops(void)
Ram Amrani51ff1722016-10-01 21:59:57 +03002943{
2944 return &qed_rdma_ops_pass;
2945}
2946EXPORT_SYMBOL(qed_get_rdma_ops);