blob: 7879bf41189105c816e6deec5bfdc4218d5e4c86 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed302bdf62015-04-02 17:07:29 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_QP_H
34#define MLX5_QP_H
35
36#include <linux/mlx5/device.h>
37#include <linux/mlx5/driver.h>
38
39#define MLX5_INVALID_LKEY 0x100
Sagi Grimberge1e66cc2014-02-23 14:19:07 +020040#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
Sagi Grimberge6631812014-02-23 14:19:11 +020041#define MLX5_DIF_SIZE 8
42#define MLX5_STRIDE_BLOCK_OP 0x400
Sagi Grimbergfd22f782014-08-13 19:54:29 +030043#define MLX5_CPY_GRD_MASK 0xc0
44#define MLX5_CPY_APP_MASK 0x30
45#define MLX5_CPY_REF_MASK 0x0f
Sagi Grimberg142537f2014-08-13 19:54:32 +030046#define MLX5_BSF_INC_REFTAG (1 << 6)
47#define MLX5_BSF_INL_VALID (1 << 15)
48#define MLX5_BSF_REFRESH_DIF (1 << 14)
49#define MLX5_BSF_REPEAT_BLOCK (1 << 7)
50#define MLX5_BSF_APPTAG_ESCAPE 0x1
51#define MLX5_BSF_APPREF_ESCAPE 0x2
Eli Cohene126ba92013-07-07 17:25:49 +030052
Haggai Erane420f0c2014-12-11 17:04:19 +020053#define MLX5_QPN_BITS 24
54#define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1)
55
Eli Cohene126ba92013-07-07 17:25:49 +030056enum mlx5_qp_optpar {
57 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
58 MLX5_QP_OPTPAR_RRE = 1 << 1,
59 MLX5_QP_OPTPAR_RAE = 1 << 2,
60 MLX5_QP_OPTPAR_RWE = 1 << 3,
61 MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4,
62 MLX5_QP_OPTPAR_Q_KEY = 1 << 5,
63 MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
64 MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
65 MLX5_QP_OPTPAR_SRA_MAX = 1 << 8,
66 MLX5_QP_OPTPAR_RRA_MAX = 1 << 9,
67 MLX5_QP_OPTPAR_PM_STATE = 1 << 10,
68 MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
69 MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
70 MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
71 MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
72 MLX5_QP_OPTPAR_SRQN = 1 << 18,
73 MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
74 MLX5_QP_OPTPAR_DC_HS = 1 << 20,
75 MLX5_QP_OPTPAR_DC_KEY = 1 << 21,
76};
77
78enum mlx5_qp_state {
79 MLX5_QP_STATE_RST = 0,
80 MLX5_QP_STATE_INIT = 1,
81 MLX5_QP_STATE_RTR = 2,
82 MLX5_QP_STATE_RTS = 3,
83 MLX5_QP_STATE_SQER = 4,
84 MLX5_QP_STATE_SQD = 5,
85 MLX5_QP_STATE_ERR = 6,
86 MLX5_QP_STATE_SQ_DRAINING = 7,
87 MLX5_QP_STATE_SUSPENDED = 9,
majd@mellanox.com6d2f89df2016-01-14 19:13:05 +020088 MLX5_QP_NUM_STATE,
89 MLX5_QP_STATE,
90 MLX5_QP_STATE_BAD,
91};
92
93enum {
94 MLX5_SQ_STATE_NA = MLX5_SQC_STATE_ERR + 1,
95 MLX5_SQ_NUM_STATE = MLX5_SQ_STATE_NA + 1,
96 MLX5_RQ_STATE_NA = MLX5_RQC_STATE_ERR + 1,
97 MLX5_RQ_NUM_STATE = MLX5_RQ_STATE_NA + 1,
Eli Cohene126ba92013-07-07 17:25:49 +030098};
99
100enum {
101 MLX5_QP_ST_RC = 0x0,
102 MLX5_QP_ST_UC = 0x1,
103 MLX5_QP_ST_UD = 0x2,
104 MLX5_QP_ST_XRC = 0x3,
105 MLX5_QP_ST_MLX = 0x4,
106 MLX5_QP_ST_DCI = 0x5,
107 MLX5_QP_ST_DCT = 0x6,
108 MLX5_QP_ST_QP0 = 0x7,
109 MLX5_QP_ST_QP1 = 0x8,
110 MLX5_QP_ST_RAW_ETHERTYPE = 0x9,
111 MLX5_QP_ST_RAW_IPV6 = 0xa,
112 MLX5_QP_ST_SNIFFER = 0xb,
113 MLX5_QP_ST_SYNC_UMR = 0xe,
114 MLX5_QP_ST_PTP_1588 = 0xd,
115 MLX5_QP_ST_REG_UMR = 0xc,
116 MLX5_QP_ST_MAX
117};
118
119enum {
120 MLX5_QP_PM_MIGRATED = 0x3,
121 MLX5_QP_PM_ARMED = 0x0,
122 MLX5_QP_PM_REARM = 0x1
123};
124
125enum {
126 MLX5_NON_ZERO_RQ = 0 << 24,
127 MLX5_SRQ_RQ = 1 << 24,
128 MLX5_CRQ_RQ = 2 << 24,
129 MLX5_ZERO_LEN_RQ = 3 << 24
130};
131
132enum {
133 /* params1 */
134 MLX5_QP_BIT_SRE = 1 << 15,
135 MLX5_QP_BIT_SWE = 1 << 14,
136 MLX5_QP_BIT_SAE = 1 << 13,
137 /* params2 */
138 MLX5_QP_BIT_RRE = 1 << 15,
139 MLX5_QP_BIT_RWE = 1 << 14,
140 MLX5_QP_BIT_RAE = 1 << 13,
141 MLX5_QP_BIT_RIC = 1 << 4,
Leon Romanovsky051f2632015-12-20 12:16:11 +0200142 MLX5_QP_BIT_CC_SLAVE_RECV = 1 << 2,
143 MLX5_QP_BIT_CC_SLAVE_SEND = 1 << 1,
144 MLX5_QP_BIT_CC_MASTER = 1 << 0
Eli Cohene126ba92013-07-07 17:25:49 +0300145};
146
147enum {
148 MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
Saeed Mahameede2816822015-05-28 22:28:40 +0300149 MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
Eli Cohene126ba92013-07-07 17:25:49 +0300150 MLX5_WQE_CTRL_SOLICITED = 1 << 1,
151};
152
153enum {
Saeed Mahameede2816822015-05-28 22:28:40 +0300154 MLX5_SEND_WQE_DS = 16,
Eli Cohene126ba92013-07-07 17:25:49 +0300155 MLX5_SEND_WQE_BB = 64,
156};
157
Saeed Mahameede2816822015-05-28 22:28:40 +0300158#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
159
160enum {
161 MLX5_SEND_WQE_MAX_WQEBBS = 16,
162};
163
Eli Cohene126ba92013-07-07 17:25:49 +0300164enum {
165 MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
166 MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
167 MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29,
168 MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30,
169 MLX5_WQE_FMR_PERM_ATOMIC = 1 << 31
170};
171
172enum {
173 MLX5_FENCE_MODE_NONE = 0 << 5,
174 MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
Eli Cohenc9b25492016-06-22 17:27:26 +0300175 MLX5_FENCE_MODE_FENCE = 2 << 5,
Eli Cohene126ba92013-07-07 17:25:49 +0300176 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
177 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
178};
179
180enum {
181 MLX5_QP_LAT_SENSITIVE = 1 << 28,
Eli Cohenf360d882014-04-02 00:10:16 +0300182 MLX5_QP_BLOCK_MCAST = 1 << 30,
Eli Cohene126ba92013-07-07 17:25:49 +0300183 MLX5_QP_ENABLE_SIG = 1 << 31,
184};
185
186enum {
187 MLX5_RCV_DBR = 0,
188 MLX5_SND_DBR = 1,
189};
190
Sagi Grimberge6631812014-02-23 14:19:11 +0200191enum {
192 MLX5_FLAGS_INLINE = 1<<7,
193 MLX5_FLAGS_CHECK_FREE = 1<<5,
194};
195
Eli Cohene126ba92013-07-07 17:25:49 +0300196struct mlx5_wqe_fmr_seg {
197 __be32 flags;
198 __be32 mem_key;
199 __be64 buf_list;
200 __be64 start_addr;
201 __be64 reg_len;
202 __be32 offset;
203 __be32 page_size;
204 u32 reserved[2];
205};
206
207struct mlx5_wqe_ctrl_seg {
208 __be32 opmod_idx_opcode;
209 __be32 qpn_ds;
210 u8 signature;
211 u8 rsvd[2];
212 u8 fm_ce_se;
213 __be32 imm;
214};
215
Haggai Eranc1395a22014-12-11 17:04:14 +0200216#define MLX5_WQE_CTRL_DS_MASK 0x3f
Haggai Eran7bdf65d2014-12-11 17:04:24 +0200217#define MLX5_WQE_CTRL_QPN_MASK 0xffffff00
218#define MLX5_WQE_CTRL_QPN_SHIFT 8
Haggai Eranc1395a22014-12-11 17:04:14 +0200219#define MLX5_WQE_DS_UNITS 16
Haggai Eran7bdf65d2014-12-11 17:04:24 +0200220#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
221#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
222#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
Haggai Eranc1395a22014-12-11 17:04:14 +0200223
Saeed Mahameede2816822015-05-28 22:28:40 +0300224enum {
225 MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
226 MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5,
227 MLX5_ETH_WQE_L3_CSUM = 1 << 6,
228 MLX5_ETH_WQE_L4_CSUM = 1 << 7,
229};
230
231struct mlx5_wqe_eth_seg {
232 u8 rsvd0[4];
233 u8 cs_flags;
234 u8 rsvd1;
235 __be16 mss;
236 __be32 rsvd2;
237 __be16 inline_hdr_sz;
238 u8 inline_hdr_start[2];
239};
240
Eli Cohene126ba92013-07-07 17:25:49 +0300241struct mlx5_wqe_xrc_seg {
242 __be32 xrc_srqn;
243 u8 rsvd[12];
244};
245
246struct mlx5_wqe_masked_atomic_seg {
247 __be64 swap_add;
248 __be64 compare;
249 __be64 swap_add_mask;
250 __be64 compare_mask;
251};
252
253struct mlx5_av {
254 union {
255 struct {
256 __be32 qkey;
257 __be32 reserved;
258 } qkey;
259 __be64 dc_key;
260 } key;
261 __be32 dqp_dct;
262 u8 stat_rate_sl;
263 u8 fl_mlid;
Achiad Shochat2811ba52015-12-23 18:47:24 +0200264 union {
265 __be16 rlid;
266 __be16 udp_sport;
267 };
268 u8 reserved0[4];
269 u8 rmac[6];
Eli Cohene126ba92013-07-07 17:25:49 +0300270 u8 tclass;
271 u8 hop_limit;
272 __be32 grh_gid_fl;
273 u8 rgid[16];
274};
275
276struct mlx5_wqe_datagram_seg {
277 struct mlx5_av av;
278};
279
280struct mlx5_wqe_raddr_seg {
281 __be64 raddr;
282 __be32 rkey;
283 u32 reserved;
284};
285
286struct mlx5_wqe_atomic_seg {
287 __be64 swap_add;
288 __be64 compare;
289};
290
291struct mlx5_wqe_data_seg {
292 __be32 byte_count;
293 __be32 lkey;
294 __be64 addr;
295};
296
297struct mlx5_wqe_umr_ctrl_seg {
298 u8 flags;
299 u8 rsvd0[3];
300 __be16 klm_octowords;
301 __be16 bsf_octowords;
302 __be64 mkey_mask;
303 u8 rsvd1[32];
304};
305
306struct mlx5_seg_set_psv {
307 __be32 psv_num;
308 __be16 syndrome;
309 __be16 status;
310 __be32 transient_sig;
311 __be32 ref_tag;
312};
313
314struct mlx5_seg_get_psv {
315 u8 rsvd[19];
316 u8 num_psv;
317 __be32 l_key;
318 __be64 va;
319 __be32 psv_index[4];
320};
321
322struct mlx5_seg_check_psv {
323 u8 rsvd0[2];
324 __be16 err_coalescing_op;
325 u8 rsvd1[2];
326 __be16 xport_err_op;
327 u8 rsvd2[2];
328 __be16 xport_err_mask;
329 u8 rsvd3[7];
330 u8 num_psv;
331 __be32 l_key;
332 __be64 va;
333 __be32 psv_index[4];
334};
335
336struct mlx5_rwqe_sig {
337 u8 rsvd0[4];
338 u8 signature;
339 u8 rsvd1[11];
340};
341
342struct mlx5_wqe_signature_seg {
343 u8 rsvd0[4];
344 u8 signature;
345 u8 rsvd1[11];
346};
347
Haggai Eran7bdf65d2014-12-11 17:04:24 +0200348#define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff
349
Eli Cohene126ba92013-07-07 17:25:49 +0300350struct mlx5_wqe_inline_seg {
351 __be32 byte_count;
352};
353
Sagi Grimberg142537f2014-08-13 19:54:32 +0300354enum mlx5_sig_type {
355 MLX5_DIF_CRC = 0x1,
356 MLX5_DIF_IPCS = 0x2,
357};
358
359struct mlx5_bsf_inl {
360 __be16 vld_refresh;
361 __be16 dif_apptag;
362 __be32 dif_reftag;
363 u8 sig_type;
364 u8 rp_inv_seed;
365 u8 rsvd[3];
366 u8 dif_inc_ref_guard_check;
367 __be16 dif_app_bitmask_check;
368};
369
Sagi Grimberge6631812014-02-23 14:19:11 +0200370struct mlx5_bsf {
371 struct mlx5_bsf_basic {
372 u8 bsf_size_sbs;
373 u8 check_byte_mask;
374 union {
375 u8 copy_byte_mask;
376 u8 bs_selector;
377 u8 rsvd_wflags;
378 } wire;
379 union {
380 u8 bs_selector;
381 u8 rsvd_mflags;
382 } mem;
383 __be32 raw_data_size;
384 __be32 w_bfs_psv;
385 __be32 m_bfs_psv;
386 } basic;
387 struct mlx5_bsf_ext {
388 __be32 t_init_gen_pro_size;
389 __be32 rsvd_epi_size;
390 __be32 w_tfs_psv;
391 __be32 m_tfs_psv;
392 } ext;
Sagi Grimberg142537f2014-08-13 19:54:32 +0300393 struct mlx5_bsf_inl w_inl;
394 struct mlx5_bsf_inl m_inl;
Sagi Grimberge6631812014-02-23 14:19:11 +0200395};
396
397struct mlx5_klm {
398 __be32 bcount;
399 __be32 key;
400 __be64 va;
401};
402
403struct mlx5_stride_block_entry {
404 __be16 stride;
405 __be16 bcount;
406 __be32 key;
407 __be64 va;
408};
409
410struct mlx5_stride_block_ctrl_seg {
411 __be32 bcount_per_cycle;
412 __be32 op;
413 __be32 repeat_count;
414 u16 rsvd;
415 __be16 num_entries;
416};
417
Haggai Erane420f0c2014-12-11 17:04:19 +0200418enum mlx5_pagefault_flags {
419 MLX5_PFAULT_REQUESTOR = 1 << 0,
420 MLX5_PFAULT_WRITE = 1 << 1,
421 MLX5_PFAULT_RDMA = 1 << 2,
422};
423
424/* Contains the details of a pagefault. */
425struct mlx5_pagefault {
426 u32 bytes_committed;
427 u8 event_subtype;
428 enum mlx5_pagefault_flags flags;
429 union {
430 /* Initiator or send message responder pagefault details. */
431 struct {
432 /* Received packet size, only valid for responders. */
433 u32 packet_size;
434 /*
435 * WQE index. Refers to either the send queue or
436 * receive queue, according to event_subtype.
437 */
438 u16 wqe_index;
439 } wqe;
440 /* RDMA responder pagefault details */
441 struct {
442 u32 r_key;
443 /*
444 * Received packet size, minimal size page fault
445 * resolution required for forward progress.
446 */
447 u32 packet_size;
448 u32 rdma_op_len;
449 u64 rdma_va;
450 } rdma;
451 };
452};
453
Eli Cohene126ba92013-07-07 17:25:49 +0300454struct mlx5_core_qp {
Eli Cohen59033252014-10-02 12:19:45 +0300455 struct mlx5_core_rsc_common common; /* must be first */
Eli Cohene126ba92013-07-07 17:25:49 +0300456 void (*event) (struct mlx5_core_qp *, int);
Haggai Erane420f0c2014-12-11 17:04:19 +0200457 void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
Eli Cohene126ba92013-07-07 17:25:49 +0300458 int qpn;
Eli Cohene126ba92013-07-07 17:25:49 +0300459 struct mlx5_rsc_debug *dbg;
460 int pid;
461};
462
463struct mlx5_qp_path {
Noa Osherovichd3ae2bd2016-06-04 15:15:36 +0300464 u8 fl_free_ar;
Eli Cohene126ba92013-07-07 17:25:49 +0300465 u8 rsvd3;
Noa Osherovichd3ae2bd2016-06-04 15:15:36 +0300466 __be16 pkey_index;
Eli Cohene126ba92013-07-07 17:25:49 +0300467 u8 rsvd0;
468 u8 grh_mlid;
469 __be16 rlid;
470 u8 ackto_lt;
471 u8 mgid_index;
472 u8 static_rate;
473 u8 hop_limit;
474 __be32 tclass_flowlabel;
Achiad Shochat2811ba52015-12-23 18:47:24 +0200475 union {
476 u8 rgid[16];
477 u8 rip[16];
478 };
479 u8 f_dscp_ecn_prio;
480 u8 ecn_dscp;
481 __be16 udp_sport;
482 u8 dci_cfi_prio_sl;
Eli Cohene126ba92013-07-07 17:25:49 +0300483 u8 port;
Achiad Shochat2811ba52015-12-23 18:47:24 +0200484 u8 rmac[6];
Eli Cohene126ba92013-07-07 17:25:49 +0300485};
486
487struct mlx5_qp_context {
488 __be32 flags;
489 __be32 flags_pd;
490 u8 mtu_msgmax;
491 u8 rq_size_stride;
492 __be16 sq_crq_size;
493 __be32 qp_counter_set_usr_page;
494 __be32 wire_qpn;
495 __be32 log_pg_sz_remote_qpn;
496 struct mlx5_qp_path pri_path;
497 struct mlx5_qp_path alt_path;
498 __be32 params1;
499 u8 reserved2[4];
500 __be32 next_send_psn;
501 __be32 cqn_send;
Haggai Eranb11a4f92016-02-29 15:45:03 +0200502 __be32 deth_sqpn;
503 u8 reserved3[4];
Eli Cohene126ba92013-07-07 17:25:49 +0300504 __be32 last_acked_psn;
505 __be32 ssn;
506 __be32 params2;
507 __be32 rnr_nextrecvpsn;
508 __be32 xrcd;
509 __be32 cqn_recv;
510 __be64 db_rec_addr;
511 __be32 qkey;
512 __be32 rq_type_srqn;
513 __be32 rmsn;
514 __be16 hw_sq_wqe_counter;
515 __be16 sw_sq_wqe_counter;
516 __be16 hw_rcyclic_byte_counter;
517 __be16 hw_rq_counter;
518 __be16 sw_rcyclic_byte_counter;
519 __be16 sw_rq_counter;
520 u8 rsvd0[5];
521 u8 cgs;
522 u8 cs_req;
523 u8 cs_res;
524 __be64 dc_access_key;
525 u8 rsvd1[24];
526};
527
528struct mlx5_create_qp_mbox_in {
529 struct mlx5_inbox_hdr hdr;
530 __be32 input_qpn;
531 u8 rsvd0[4];
532 __be32 opt_param_mask;
533 u8 rsvd1[4];
534 struct mlx5_qp_context ctx;
535 u8 rsvd3[16];
536 __be64 pas[0];
537};
538
539struct mlx5_create_qp_mbox_out {
540 struct mlx5_outbox_hdr hdr;
541 __be32 qpn;
542 u8 rsvd0[4];
543};
544
545struct mlx5_destroy_qp_mbox_in {
546 struct mlx5_inbox_hdr hdr;
547 __be32 qpn;
548 u8 rsvd0[4];
549};
550
551struct mlx5_destroy_qp_mbox_out {
552 struct mlx5_outbox_hdr hdr;
553 u8 rsvd0[8];
554};
555
556struct mlx5_modify_qp_mbox_in {
557 struct mlx5_inbox_hdr hdr;
558 __be32 qpn;
Eli Cohene126ba92013-07-07 17:25:49 +0300559 u8 rsvd0[4];
Artemy Kovalyove3353c22016-06-17 15:33:31 +0300560 __be32 optparam;
561 u8 rsvd1[4];
Eli Cohene126ba92013-07-07 17:25:49 +0300562 struct mlx5_qp_context ctx;
Majd Dibbiny418f8392016-06-10 00:07:28 +0300563 u8 rsvd2[16];
Eli Cohene126ba92013-07-07 17:25:49 +0300564};
565
566struct mlx5_modify_qp_mbox_out {
567 struct mlx5_outbox_hdr hdr;
568 u8 rsvd0[8];
569};
570
571struct mlx5_query_qp_mbox_in {
572 struct mlx5_inbox_hdr hdr;
573 __be32 qpn;
574 u8 rsvd[4];
575};
576
577struct mlx5_query_qp_mbox_out {
578 struct mlx5_outbox_hdr hdr;
579 u8 rsvd1[8];
580 __be32 optparam;
581 u8 rsvd0[4];
582 struct mlx5_qp_context ctx;
583 u8 rsvd2[16];
584 __be64 pas[0];
585};
586
587struct mlx5_conf_sqp_mbox_in {
588 struct mlx5_inbox_hdr hdr;
589 __be32 qpn;
590 u8 rsvd[3];
591 u8 type;
592};
593
594struct mlx5_conf_sqp_mbox_out {
595 struct mlx5_outbox_hdr hdr;
596 u8 rsvd[8];
597};
598
599struct mlx5_alloc_xrcd_mbox_in {
600 struct mlx5_inbox_hdr hdr;
601 u8 rsvd[8];
602};
603
604struct mlx5_alloc_xrcd_mbox_out {
605 struct mlx5_outbox_hdr hdr;
606 __be32 xrcdn;
607 u8 rsvd[4];
608};
609
610struct mlx5_dealloc_xrcd_mbox_in {
611 struct mlx5_inbox_hdr hdr;
612 __be32 xrcdn;
613 u8 rsvd[4];
614};
615
616struct mlx5_dealloc_xrcd_mbox_out {
617 struct mlx5_outbox_hdr hdr;
618 u8 rsvd[8];
619};
620
621static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
622{
623 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
624}
625
Matan Baraka606b0f2016-02-29 18:05:28 +0200626static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200627{
Matan Baraka606b0f2016-02-29 18:05:28 +0200628 return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200629}
630
Haggai Erane420f0c2014-12-11 17:04:19 +0200631struct mlx5_page_fault_resume_mbox_in {
632 struct mlx5_inbox_hdr hdr;
633 __be32 flags_qpn;
634 u8 reserved[4];
635};
636
637struct mlx5_page_fault_resume_mbox_out {
638 struct mlx5_outbox_hdr hdr;
639 u8 rsvd[8];
640};
641
Eli Cohene126ba92013-07-07 17:25:49 +0300642int mlx5_core_create_qp(struct mlx5_core_dev *dev,
643 struct mlx5_core_qp *qp,
644 struct mlx5_create_qp_mbox_in *in,
645 int inlen);
majd@mellanox.com427c1e72016-01-14 19:13:07 +0200646int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
Eli Cohene126ba92013-07-07 17:25:49 +0300647 struct mlx5_modify_qp_mbox_in *in, int sqd_event,
648 struct mlx5_core_qp *qp);
649int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
650 struct mlx5_core_qp *qp);
651int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
652 struct mlx5_query_qp_mbox_out *out, int outlen);
653
654int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
655int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
656void mlx5_init_qp_table(struct mlx5_core_dev *dev);
657void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
658int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
659void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
Haggai Erane420f0c2014-12-11 17:04:19 +0200660#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
661int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
662 u8 context, int error);
663#endif
majd@mellanox.come2013b22016-01-14 19:13:00 +0200664int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
665 struct mlx5_core_qp *rq);
666void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
667 struct mlx5_core_qp *rq);
668int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
669 struct mlx5_core_qp *sq);
670void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
671 struct mlx5_core_qp *sq);
Tariq Toukan237cd212016-04-20 22:02:09 +0300672int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id);
673int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
674int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
675 int reset, void *out, int out_size);
676int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
677 u32 *out_of_buffer);
Eli Cohene126ba92013-07-07 17:25:49 +0300678
Eli Cohendb81a5c2014-01-14 17:45:19 +0200679static inline const char *mlx5_qp_type_str(int type)
680{
681 switch (type) {
682 case MLX5_QP_ST_RC: return "RC";
683 case MLX5_QP_ST_UC: return "C";
684 case MLX5_QP_ST_UD: return "UD";
685 case MLX5_QP_ST_XRC: return "XRC";
686 case MLX5_QP_ST_MLX: return "MLX";
687 case MLX5_QP_ST_QP0: return "QP0";
688 case MLX5_QP_ST_QP1: return "QP1";
689 case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
690 case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6";
691 case MLX5_QP_ST_SNIFFER: return "SNIFFER";
692 case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
693 case MLX5_QP_ST_PTP_1588: return "PTP_1588";
694 case MLX5_QP_ST_REG_UMR: return "REG_UMR";
695 default: return "Invalid transport type";
696 }
697}
698
699static inline const char *mlx5_qp_state_str(int state)
700{
701 switch (state) {
702 case MLX5_QP_STATE_RST:
703 return "RST";
704 case MLX5_QP_STATE_INIT:
705 return "INIT";
706 case MLX5_QP_STATE_RTR:
707 return "RTR";
708 case MLX5_QP_STATE_RTS:
709 return "RTS";
710 case MLX5_QP_STATE_SQER:
711 return "SQER";
712 case MLX5_QP_STATE_SQD:
713 return "SQD";
714 case MLX5_QP_STATE_ERR:
715 return "ERR";
716 case MLX5_QP_STATE_SQ_DRAINING:
717 return "SQ_DRAINING";
718 case MLX5_QP_STATE_SUSPENDED:
719 return "SUSPENDED";
720 default: return "Invalid QP state";
721 }
722}
723
Eli Cohene126ba92013-07-07 17:25:49 +0300724#endif /* MLX5_QP_H */