blob: cf031a3f16c583047d6c59389e7d57ce198356d1 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed302bdf62015-04-02 17:07:29 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_QP_H
34#define MLX5_QP_H
35
36#include <linux/mlx5/device.h>
37#include <linux/mlx5/driver.h>
38
39#define MLX5_INVALID_LKEY 0x100
Sagi Grimberge1e66cc2014-02-23 14:19:07 +020040#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
Sagi Grimberge6631812014-02-23 14:19:11 +020041#define MLX5_DIF_SIZE 8
42#define MLX5_STRIDE_BLOCK_OP 0x400
Sagi Grimbergfd22f782014-08-13 19:54:29 +030043#define MLX5_CPY_GRD_MASK 0xc0
44#define MLX5_CPY_APP_MASK 0x30
45#define MLX5_CPY_REF_MASK 0x0f
Sagi Grimberg142537f2014-08-13 19:54:32 +030046#define MLX5_BSF_INC_REFTAG (1 << 6)
47#define MLX5_BSF_INL_VALID (1 << 15)
48#define MLX5_BSF_REFRESH_DIF (1 << 14)
49#define MLX5_BSF_REPEAT_BLOCK (1 << 7)
50#define MLX5_BSF_APPTAG_ESCAPE 0x1
51#define MLX5_BSF_APPREF_ESCAPE 0x2
Eli Cohene126ba92013-07-07 17:25:49 +030052
Haggai Erane420f0c2014-12-11 17:04:19 +020053#define MLX5_QPN_BITS 24
54#define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1)
55
Eli Cohene126ba92013-07-07 17:25:49 +030056enum mlx5_qp_optpar {
57 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
58 MLX5_QP_OPTPAR_RRE = 1 << 1,
59 MLX5_QP_OPTPAR_RAE = 1 << 2,
60 MLX5_QP_OPTPAR_RWE = 1 << 3,
61 MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4,
62 MLX5_QP_OPTPAR_Q_KEY = 1 << 5,
63 MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
64 MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
65 MLX5_QP_OPTPAR_SRA_MAX = 1 << 8,
66 MLX5_QP_OPTPAR_RRA_MAX = 1 << 9,
67 MLX5_QP_OPTPAR_PM_STATE = 1 << 10,
68 MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
69 MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
70 MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
71 MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
72 MLX5_QP_OPTPAR_SRQN = 1 << 18,
73 MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
74 MLX5_QP_OPTPAR_DC_HS = 1 << 20,
75 MLX5_QP_OPTPAR_DC_KEY = 1 << 21,
76};
77
78enum mlx5_qp_state {
79 MLX5_QP_STATE_RST = 0,
80 MLX5_QP_STATE_INIT = 1,
81 MLX5_QP_STATE_RTR = 2,
82 MLX5_QP_STATE_RTS = 3,
83 MLX5_QP_STATE_SQER = 4,
84 MLX5_QP_STATE_SQD = 5,
85 MLX5_QP_STATE_ERR = 6,
86 MLX5_QP_STATE_SQ_DRAINING = 7,
87 MLX5_QP_STATE_SUSPENDED = 9,
majd@mellanox.com6d2f89df2016-01-14 19:13:05 +020088 MLX5_QP_NUM_STATE,
89 MLX5_QP_STATE,
90 MLX5_QP_STATE_BAD,
91};
92
93enum {
94 MLX5_SQ_STATE_NA = MLX5_SQC_STATE_ERR + 1,
95 MLX5_SQ_NUM_STATE = MLX5_SQ_STATE_NA + 1,
96 MLX5_RQ_STATE_NA = MLX5_RQC_STATE_ERR + 1,
97 MLX5_RQ_NUM_STATE = MLX5_RQ_STATE_NA + 1,
Eli Cohene126ba92013-07-07 17:25:49 +030098};
99
100enum {
101 MLX5_QP_ST_RC = 0x0,
102 MLX5_QP_ST_UC = 0x1,
103 MLX5_QP_ST_UD = 0x2,
104 MLX5_QP_ST_XRC = 0x3,
105 MLX5_QP_ST_MLX = 0x4,
106 MLX5_QP_ST_DCI = 0x5,
107 MLX5_QP_ST_DCT = 0x6,
108 MLX5_QP_ST_QP0 = 0x7,
109 MLX5_QP_ST_QP1 = 0x8,
110 MLX5_QP_ST_RAW_ETHERTYPE = 0x9,
111 MLX5_QP_ST_RAW_IPV6 = 0xa,
112 MLX5_QP_ST_SNIFFER = 0xb,
113 MLX5_QP_ST_SYNC_UMR = 0xe,
114 MLX5_QP_ST_PTP_1588 = 0xd,
115 MLX5_QP_ST_REG_UMR = 0xc,
116 MLX5_QP_ST_MAX
117};
118
119enum {
120 MLX5_QP_PM_MIGRATED = 0x3,
121 MLX5_QP_PM_ARMED = 0x0,
122 MLX5_QP_PM_REARM = 0x1
123};
124
125enum {
126 MLX5_NON_ZERO_RQ = 0 << 24,
127 MLX5_SRQ_RQ = 1 << 24,
128 MLX5_CRQ_RQ = 2 << 24,
129 MLX5_ZERO_LEN_RQ = 3 << 24
130};
131
132enum {
133 /* params1 */
134 MLX5_QP_BIT_SRE = 1 << 15,
135 MLX5_QP_BIT_SWE = 1 << 14,
136 MLX5_QP_BIT_SAE = 1 << 13,
137 /* params2 */
138 MLX5_QP_BIT_RRE = 1 << 15,
139 MLX5_QP_BIT_RWE = 1 << 14,
140 MLX5_QP_BIT_RAE = 1 << 13,
141 MLX5_QP_BIT_RIC = 1 << 4,
Leon Romanovsky051f2632015-12-20 12:16:11 +0200142 MLX5_QP_BIT_CC_SLAVE_RECV = 1 << 2,
143 MLX5_QP_BIT_CC_SLAVE_SEND = 1 << 1,
144 MLX5_QP_BIT_CC_MASTER = 1 << 0
Eli Cohene126ba92013-07-07 17:25:49 +0300145};
146
147enum {
148 MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
Saeed Mahameede2816822015-05-28 22:28:40 +0300149 MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
Eli Cohene126ba92013-07-07 17:25:49 +0300150 MLX5_WQE_CTRL_SOLICITED = 1 << 1,
151};
152
153enum {
Saeed Mahameede2816822015-05-28 22:28:40 +0300154 MLX5_SEND_WQE_DS = 16,
Eli Cohene126ba92013-07-07 17:25:49 +0300155 MLX5_SEND_WQE_BB = 64,
156};
157
Saeed Mahameede2816822015-05-28 22:28:40 +0300158#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
159
160enum {
161 MLX5_SEND_WQE_MAX_WQEBBS = 16,
162};
163
Eli Cohene126ba92013-07-07 17:25:49 +0300164enum {
165 MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
166 MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
167 MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29,
168 MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30,
169 MLX5_WQE_FMR_PERM_ATOMIC = 1 << 31
170};
171
172enum {
173 MLX5_FENCE_MODE_NONE = 0 << 5,
174 MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
175 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
176 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
177};
178
179enum {
180 MLX5_QP_LAT_SENSITIVE = 1 << 28,
Eli Cohenf360d882014-04-02 00:10:16 +0300181 MLX5_QP_BLOCK_MCAST = 1 << 30,
Eli Cohene126ba92013-07-07 17:25:49 +0300182 MLX5_QP_ENABLE_SIG = 1 << 31,
183};
184
185enum {
186 MLX5_RCV_DBR = 0,
187 MLX5_SND_DBR = 1,
188};
189
Sagi Grimberge6631812014-02-23 14:19:11 +0200190enum {
191 MLX5_FLAGS_INLINE = 1<<7,
192 MLX5_FLAGS_CHECK_FREE = 1<<5,
193};
194
Eli Cohene126ba92013-07-07 17:25:49 +0300195struct mlx5_wqe_fmr_seg {
196 __be32 flags;
197 __be32 mem_key;
198 __be64 buf_list;
199 __be64 start_addr;
200 __be64 reg_len;
201 __be32 offset;
202 __be32 page_size;
203 u32 reserved[2];
204};
205
206struct mlx5_wqe_ctrl_seg {
207 __be32 opmod_idx_opcode;
208 __be32 qpn_ds;
209 u8 signature;
210 u8 rsvd[2];
211 u8 fm_ce_se;
212 __be32 imm;
213};
214
Haggai Eranc1395a22014-12-11 17:04:14 +0200215#define MLX5_WQE_CTRL_DS_MASK 0x3f
Haggai Eran7bdf65d2014-12-11 17:04:24 +0200216#define MLX5_WQE_CTRL_QPN_MASK 0xffffff00
217#define MLX5_WQE_CTRL_QPN_SHIFT 8
Haggai Eranc1395a22014-12-11 17:04:14 +0200218#define MLX5_WQE_DS_UNITS 16
Haggai Eran7bdf65d2014-12-11 17:04:24 +0200219#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
220#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
221#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
Haggai Eranc1395a22014-12-11 17:04:14 +0200222
Saeed Mahameede2816822015-05-28 22:28:40 +0300223enum {
224 MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
225 MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5,
226 MLX5_ETH_WQE_L3_CSUM = 1 << 6,
227 MLX5_ETH_WQE_L4_CSUM = 1 << 7,
228};
229
230struct mlx5_wqe_eth_seg {
231 u8 rsvd0[4];
232 u8 cs_flags;
233 u8 rsvd1;
234 __be16 mss;
235 __be32 rsvd2;
236 __be16 inline_hdr_sz;
237 u8 inline_hdr_start[2];
238};
239
Eli Cohene126ba92013-07-07 17:25:49 +0300240struct mlx5_wqe_xrc_seg {
241 __be32 xrc_srqn;
242 u8 rsvd[12];
243};
244
245struct mlx5_wqe_masked_atomic_seg {
246 __be64 swap_add;
247 __be64 compare;
248 __be64 swap_add_mask;
249 __be64 compare_mask;
250};
251
252struct mlx5_av {
253 union {
254 struct {
255 __be32 qkey;
256 __be32 reserved;
257 } qkey;
258 __be64 dc_key;
259 } key;
260 __be32 dqp_dct;
261 u8 stat_rate_sl;
262 u8 fl_mlid;
Achiad Shochat2811ba52015-12-23 18:47:24 +0200263 union {
264 __be16 rlid;
265 __be16 udp_sport;
266 };
267 u8 reserved0[4];
268 u8 rmac[6];
Eli Cohene126ba92013-07-07 17:25:49 +0300269 u8 tclass;
270 u8 hop_limit;
271 __be32 grh_gid_fl;
272 u8 rgid[16];
273};
274
275struct mlx5_wqe_datagram_seg {
276 struct mlx5_av av;
277};
278
279struct mlx5_wqe_raddr_seg {
280 __be64 raddr;
281 __be32 rkey;
282 u32 reserved;
283};
284
285struct mlx5_wqe_atomic_seg {
286 __be64 swap_add;
287 __be64 compare;
288};
289
290struct mlx5_wqe_data_seg {
291 __be32 byte_count;
292 __be32 lkey;
293 __be64 addr;
294};
295
296struct mlx5_wqe_umr_ctrl_seg {
297 u8 flags;
298 u8 rsvd0[3];
299 __be16 klm_octowords;
300 __be16 bsf_octowords;
301 __be64 mkey_mask;
302 u8 rsvd1[32];
303};
304
305struct mlx5_seg_set_psv {
306 __be32 psv_num;
307 __be16 syndrome;
308 __be16 status;
309 __be32 transient_sig;
310 __be32 ref_tag;
311};
312
313struct mlx5_seg_get_psv {
314 u8 rsvd[19];
315 u8 num_psv;
316 __be32 l_key;
317 __be64 va;
318 __be32 psv_index[4];
319};
320
321struct mlx5_seg_check_psv {
322 u8 rsvd0[2];
323 __be16 err_coalescing_op;
324 u8 rsvd1[2];
325 __be16 xport_err_op;
326 u8 rsvd2[2];
327 __be16 xport_err_mask;
328 u8 rsvd3[7];
329 u8 num_psv;
330 __be32 l_key;
331 __be64 va;
332 __be32 psv_index[4];
333};
334
335struct mlx5_rwqe_sig {
336 u8 rsvd0[4];
337 u8 signature;
338 u8 rsvd1[11];
339};
340
341struct mlx5_wqe_signature_seg {
342 u8 rsvd0[4];
343 u8 signature;
344 u8 rsvd1[11];
345};
346
Haggai Eran7bdf65d2014-12-11 17:04:24 +0200347#define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff
348
Eli Cohene126ba92013-07-07 17:25:49 +0300349struct mlx5_wqe_inline_seg {
350 __be32 byte_count;
351};
352
Sagi Grimberg142537f2014-08-13 19:54:32 +0300353enum mlx5_sig_type {
354 MLX5_DIF_CRC = 0x1,
355 MLX5_DIF_IPCS = 0x2,
356};
357
358struct mlx5_bsf_inl {
359 __be16 vld_refresh;
360 __be16 dif_apptag;
361 __be32 dif_reftag;
362 u8 sig_type;
363 u8 rp_inv_seed;
364 u8 rsvd[3];
365 u8 dif_inc_ref_guard_check;
366 __be16 dif_app_bitmask_check;
367};
368
Sagi Grimberge6631812014-02-23 14:19:11 +0200369struct mlx5_bsf {
370 struct mlx5_bsf_basic {
371 u8 bsf_size_sbs;
372 u8 check_byte_mask;
373 union {
374 u8 copy_byte_mask;
375 u8 bs_selector;
376 u8 rsvd_wflags;
377 } wire;
378 union {
379 u8 bs_selector;
380 u8 rsvd_mflags;
381 } mem;
382 __be32 raw_data_size;
383 __be32 w_bfs_psv;
384 __be32 m_bfs_psv;
385 } basic;
386 struct mlx5_bsf_ext {
387 __be32 t_init_gen_pro_size;
388 __be32 rsvd_epi_size;
389 __be32 w_tfs_psv;
390 __be32 m_tfs_psv;
391 } ext;
Sagi Grimberg142537f2014-08-13 19:54:32 +0300392 struct mlx5_bsf_inl w_inl;
393 struct mlx5_bsf_inl m_inl;
Sagi Grimberge6631812014-02-23 14:19:11 +0200394};
395
396struct mlx5_klm {
397 __be32 bcount;
398 __be32 key;
399 __be64 va;
400};
401
402struct mlx5_stride_block_entry {
403 __be16 stride;
404 __be16 bcount;
405 __be32 key;
406 __be64 va;
407};
408
409struct mlx5_stride_block_ctrl_seg {
410 __be32 bcount_per_cycle;
411 __be32 op;
412 __be32 repeat_count;
413 u16 rsvd;
414 __be16 num_entries;
415};
416
Haggai Erane420f0c2014-12-11 17:04:19 +0200417enum mlx5_pagefault_flags {
418 MLX5_PFAULT_REQUESTOR = 1 << 0,
419 MLX5_PFAULT_WRITE = 1 << 1,
420 MLX5_PFAULT_RDMA = 1 << 2,
421};
422
423/* Contains the details of a pagefault. */
424struct mlx5_pagefault {
425 u32 bytes_committed;
426 u8 event_subtype;
427 enum mlx5_pagefault_flags flags;
428 union {
429 /* Initiator or send message responder pagefault details. */
430 struct {
431 /* Received packet size, only valid for responders. */
432 u32 packet_size;
433 /*
434 * WQE index. Refers to either the send queue or
435 * receive queue, according to event_subtype.
436 */
437 u16 wqe_index;
438 } wqe;
439 /* RDMA responder pagefault details */
440 struct {
441 u32 r_key;
442 /*
443 * Received packet size, minimal size page fault
444 * resolution required for forward progress.
445 */
446 u32 packet_size;
447 u32 rdma_op_len;
448 u64 rdma_va;
449 } rdma;
450 };
451};
452
Eli Cohene126ba92013-07-07 17:25:49 +0300453struct mlx5_core_qp {
Eli Cohen59033252014-10-02 12:19:45 +0300454 struct mlx5_core_rsc_common common; /* must be first */
Eli Cohene126ba92013-07-07 17:25:49 +0300455 void (*event) (struct mlx5_core_qp *, int);
Haggai Erane420f0c2014-12-11 17:04:19 +0200456 void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
Eli Cohene126ba92013-07-07 17:25:49 +0300457 int qpn;
Eli Cohene126ba92013-07-07 17:25:49 +0300458 struct mlx5_rsc_debug *dbg;
459 int pid;
460};
461
462struct mlx5_qp_path {
463 u8 fl;
464 u8 rsvd3;
465 u8 free_ar;
466 u8 pkey_index;
467 u8 rsvd0;
468 u8 grh_mlid;
469 __be16 rlid;
470 u8 ackto_lt;
471 u8 mgid_index;
472 u8 static_rate;
473 u8 hop_limit;
474 __be32 tclass_flowlabel;
Achiad Shochat2811ba52015-12-23 18:47:24 +0200475 union {
476 u8 rgid[16];
477 u8 rip[16];
478 };
479 u8 f_dscp_ecn_prio;
480 u8 ecn_dscp;
481 __be16 udp_sport;
482 u8 dci_cfi_prio_sl;
Eli Cohene126ba92013-07-07 17:25:49 +0300483 u8 port;
Achiad Shochat2811ba52015-12-23 18:47:24 +0200484 u8 rmac[6];
Eli Cohene126ba92013-07-07 17:25:49 +0300485};
486
487struct mlx5_qp_context {
488 __be32 flags;
489 __be32 flags_pd;
490 u8 mtu_msgmax;
491 u8 rq_size_stride;
492 __be16 sq_crq_size;
493 __be32 qp_counter_set_usr_page;
494 __be32 wire_qpn;
495 __be32 log_pg_sz_remote_qpn;
496 struct mlx5_qp_path pri_path;
497 struct mlx5_qp_path alt_path;
498 __be32 params1;
499 u8 reserved2[4];
500 __be32 next_send_psn;
501 __be32 cqn_send;
Haggai Eranb11a4f92016-02-29 15:45:03 +0200502 __be32 deth_sqpn;
503 u8 reserved3[4];
Eli Cohene126ba92013-07-07 17:25:49 +0300504 __be32 last_acked_psn;
505 __be32 ssn;
506 __be32 params2;
507 __be32 rnr_nextrecvpsn;
508 __be32 xrcd;
509 __be32 cqn_recv;
510 __be64 db_rec_addr;
511 __be32 qkey;
512 __be32 rq_type_srqn;
513 __be32 rmsn;
514 __be16 hw_sq_wqe_counter;
515 __be16 sw_sq_wqe_counter;
516 __be16 hw_rcyclic_byte_counter;
517 __be16 hw_rq_counter;
518 __be16 sw_rcyclic_byte_counter;
519 __be16 sw_rq_counter;
520 u8 rsvd0[5];
521 u8 cgs;
522 u8 cs_req;
523 u8 cs_res;
524 __be64 dc_access_key;
525 u8 rsvd1[24];
526};
527
528struct mlx5_create_qp_mbox_in {
529 struct mlx5_inbox_hdr hdr;
530 __be32 input_qpn;
531 u8 rsvd0[4];
532 __be32 opt_param_mask;
533 u8 rsvd1[4];
534 struct mlx5_qp_context ctx;
535 u8 rsvd3[16];
536 __be64 pas[0];
537};
538
539struct mlx5_create_qp_mbox_out {
540 struct mlx5_outbox_hdr hdr;
541 __be32 qpn;
542 u8 rsvd0[4];
543};
544
545struct mlx5_destroy_qp_mbox_in {
546 struct mlx5_inbox_hdr hdr;
547 __be32 qpn;
548 u8 rsvd0[4];
549};
550
551struct mlx5_destroy_qp_mbox_out {
552 struct mlx5_outbox_hdr hdr;
553 u8 rsvd0[8];
554};
555
556struct mlx5_modify_qp_mbox_in {
557 struct mlx5_inbox_hdr hdr;
558 __be32 qpn;
559 u8 rsvd1[4];
560 __be32 optparam;
561 u8 rsvd0[4];
562 struct mlx5_qp_context ctx;
563};
564
565struct mlx5_modify_qp_mbox_out {
566 struct mlx5_outbox_hdr hdr;
567 u8 rsvd0[8];
568};
569
570struct mlx5_query_qp_mbox_in {
571 struct mlx5_inbox_hdr hdr;
572 __be32 qpn;
573 u8 rsvd[4];
574};
575
576struct mlx5_query_qp_mbox_out {
577 struct mlx5_outbox_hdr hdr;
578 u8 rsvd1[8];
579 __be32 optparam;
580 u8 rsvd0[4];
581 struct mlx5_qp_context ctx;
582 u8 rsvd2[16];
583 __be64 pas[0];
584};
585
586struct mlx5_conf_sqp_mbox_in {
587 struct mlx5_inbox_hdr hdr;
588 __be32 qpn;
589 u8 rsvd[3];
590 u8 type;
591};
592
593struct mlx5_conf_sqp_mbox_out {
594 struct mlx5_outbox_hdr hdr;
595 u8 rsvd[8];
596};
597
598struct mlx5_alloc_xrcd_mbox_in {
599 struct mlx5_inbox_hdr hdr;
600 u8 rsvd[8];
601};
602
603struct mlx5_alloc_xrcd_mbox_out {
604 struct mlx5_outbox_hdr hdr;
605 __be32 xrcdn;
606 u8 rsvd[4];
607};
608
609struct mlx5_dealloc_xrcd_mbox_in {
610 struct mlx5_inbox_hdr hdr;
611 __be32 xrcdn;
612 u8 rsvd[4];
613};
614
615struct mlx5_dealloc_xrcd_mbox_out {
616 struct mlx5_outbox_hdr hdr;
617 u8 rsvd[8];
618};
619
620static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
621{
622 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
623}
624
Matan Baraka606b0f2016-02-29 18:05:28 +0200625static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200626{
Matan Baraka606b0f2016-02-29 18:05:28 +0200627 return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200628}
629
Haggai Erane420f0c2014-12-11 17:04:19 +0200630struct mlx5_page_fault_resume_mbox_in {
631 struct mlx5_inbox_hdr hdr;
632 __be32 flags_qpn;
633 u8 reserved[4];
634};
635
636struct mlx5_page_fault_resume_mbox_out {
637 struct mlx5_outbox_hdr hdr;
638 u8 rsvd[8];
639};
640
Eli Cohene126ba92013-07-07 17:25:49 +0300641int mlx5_core_create_qp(struct mlx5_core_dev *dev,
642 struct mlx5_core_qp *qp,
643 struct mlx5_create_qp_mbox_in *in,
644 int inlen);
majd@mellanox.com427c1e72016-01-14 19:13:07 +0200645int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
Eli Cohene126ba92013-07-07 17:25:49 +0300646 struct mlx5_modify_qp_mbox_in *in, int sqd_event,
647 struct mlx5_core_qp *qp);
648int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
649 struct mlx5_core_qp *qp);
650int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
651 struct mlx5_query_qp_mbox_out *out, int outlen);
652
653int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
654int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
655void mlx5_init_qp_table(struct mlx5_core_dev *dev);
656void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
657int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
658void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
Haggai Erane420f0c2014-12-11 17:04:19 +0200659#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
660int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
661 u8 context, int error);
662#endif
majd@mellanox.come2013b22016-01-14 19:13:00 +0200663int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
664 struct mlx5_core_qp *rq);
665void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
666 struct mlx5_core_qp *rq);
667int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
668 struct mlx5_core_qp *sq);
669void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
670 struct mlx5_core_qp *sq);
Eli Cohene126ba92013-07-07 17:25:49 +0300671
Eli Cohendb81a5c2014-01-14 17:45:19 +0200672static inline const char *mlx5_qp_type_str(int type)
673{
674 switch (type) {
675 case MLX5_QP_ST_RC: return "RC";
676 case MLX5_QP_ST_UC: return "C";
677 case MLX5_QP_ST_UD: return "UD";
678 case MLX5_QP_ST_XRC: return "XRC";
679 case MLX5_QP_ST_MLX: return "MLX";
680 case MLX5_QP_ST_QP0: return "QP0";
681 case MLX5_QP_ST_QP1: return "QP1";
682 case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
683 case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6";
684 case MLX5_QP_ST_SNIFFER: return "SNIFFER";
685 case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
686 case MLX5_QP_ST_PTP_1588: return "PTP_1588";
687 case MLX5_QP_ST_REG_UMR: return "REG_UMR";
688 default: return "Invalid transport type";
689 }
690}
691
692static inline const char *mlx5_qp_state_str(int state)
693{
694 switch (state) {
695 case MLX5_QP_STATE_RST:
696 return "RST";
697 case MLX5_QP_STATE_INIT:
698 return "INIT";
699 case MLX5_QP_STATE_RTR:
700 return "RTR";
701 case MLX5_QP_STATE_RTS:
702 return "RTS";
703 case MLX5_QP_STATE_SQER:
704 return "SQER";
705 case MLX5_QP_STATE_SQD:
706 return "SQD";
707 case MLX5_QP_STATE_ERR:
708 return "ERR";
709 case MLX5_QP_STATE_SQ_DRAINING:
710 return "SQ_DRAINING";
711 case MLX5_QP_STATE_SUSPENDED:
712 return "SUSPENDED";
713 default: return "Invalid QP state";
714 }
715}
716
Eli Cohene126ba92013-07-07 17:25:49 +0300717#endif /* MLX5_QP_H */