{net,IB}/mlx5: QP/XRCD commands via mlx5 ifc

Remove old representation of manually created QP/XRCD commands layout
amd use mlx5_ifc canonical structures and defines.

Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 21ab0e2..d22492f 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -726,7 +726,7 @@
 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 			  struct mlx5_ib_qp *qp, struct ib_udata *udata,
 			  struct ib_qp_init_attr *attr,
-			  struct mlx5_create_qp_mbox_in **in,
+			  u32 **in,
 			  struct mlx5_ib_create_qp_resp *resp, int *inlen,
 			  struct mlx5_ib_qp_base *base)
 {
@@ -739,6 +739,8 @@
 	u32 offset = 0;
 	int uuarn;
 	int ncont = 0;
+	__be64 *pas;
+	void *qpc;
 	int err;
 
 	err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
@@ -795,20 +797,24 @@
 		ubuffer->umem = NULL;
 	}
 
-	*inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
+	*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+		 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont;
 	*in = mlx5_vzalloc(*inlen);
 	if (!*in) {
 		err = -ENOMEM;
 		goto err_umem;
 	}
-	if (ubuffer->umem)
-		mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift,
-				     (*in)->pas, 0);
-	(*in)->ctx.log_pg_sz_remote_qpn =
-		cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
-	(*in)->ctx.params2 = cpu_to_be32(offset << 6);
 
-	(*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
+	pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
+	if (ubuffer->umem)
+		mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);
+
+	qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
+
+	MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+	MLX5_SET(qpc, qpc, page_offset, offset);
+
+	MLX5_SET(qpc, qpc, uar_page, uar_index);
 	resp->uuar_index = uuarn;
 	qp->uuarn = uuarn;
 
@@ -857,12 +863,13 @@
 static int create_kernel_qp(struct mlx5_ib_dev *dev,
 			    struct ib_qp_init_attr *init_attr,
 			    struct mlx5_ib_qp *qp,
-			    struct mlx5_create_qp_mbox_in **in, int *inlen,
+			    u32 **in, int *inlen,
 			    struct mlx5_ib_qp_base *base)
 {
 	enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
 	struct mlx5_uuar_info *uuari;
 	int uar_index;
+	void *qpc;
 	int uuarn;
 	int err;
 
@@ -902,25 +909,29 @@
 	}
 
 	qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
-	*inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
+	*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+		 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
 	*in = mlx5_vzalloc(*inlen);
 	if (!*in) {
 		err = -ENOMEM;
 		goto err_buf;
 	}
-	(*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
-	(*in)->ctx.log_pg_sz_remote_qpn =
-		cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
+
+	qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
+	MLX5_SET(qpc, qpc, uar_page, uar_index);
+	MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+
 	/* Set "fast registration enabled" for all kernel QPs */
-	(*in)->ctx.params1 |= cpu_to_be32(1 << 11);
-	(*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
+	MLX5_SET(qpc, qpc, fre, 1);
+	MLX5_SET(qpc, qpc, rlky, 1);
 
 	if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) {
-		(*in)->ctx.deth_sqpn = cpu_to_be32(1);
+		MLX5_SET(qpc, qpc, deth_sqpn, 1);
 		qp->flags |= MLX5_IB_QP_SQPN_QP1;
 	}
 
-	mlx5_fill_page_array(&qp->buf, (*in)->pas);
+	mlx5_fill_page_array(&qp->buf,
+			     (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas));
 
 	err = mlx5_db_alloc(dev->mdev, &qp->db);
 	if (err) {
@@ -974,15 +985,15 @@
 	free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
 }
 
-static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
+static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
 {
 	if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
 	    (attr->qp_type == IB_QPT_XRC_INI))
-		return cpu_to_be32(MLX5_SRQ_RQ);
+		return MLX5_SRQ_RQ;
 	else if (!qp->has_rq)
-		return cpu_to_be32(MLX5_ZERO_LEN_RQ);
+		return MLX5_ZERO_LEN_RQ;
 	else
-		return cpu_to_be32(MLX5_NON_ZERO_RQ);
+		return MLX5_NON_ZERO_RQ;
 }
 
 static int is_connected(enum ib_qp_type qp_type)
@@ -1191,7 +1202,7 @@
 }
 
 static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
-				struct mlx5_create_qp_mbox_in *in,
+				u32 *in,
 				struct ib_pd *pd)
 {
 	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
@@ -1461,18 +1472,18 @@
 			    struct ib_udata *udata, struct mlx5_ib_qp *qp)
 {
 	struct mlx5_ib_resources *devr = &dev->devr;
+	int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
 	struct mlx5_core_dev *mdev = dev->mdev;
-	struct mlx5_ib_qp_base *base;
 	struct mlx5_ib_create_qp_resp resp;
-	struct mlx5_create_qp_mbox_in *in;
-	struct mlx5_ib_create_qp ucmd;
 	struct mlx5_ib_cq *send_cq;
 	struct mlx5_ib_cq *recv_cq;
 	unsigned long flags;
-	int inlen = sizeof(*in);
-	int err;
 	u32 uidx = MLX5_IB_DEFAULT_UIDX;
+	struct mlx5_ib_create_qp ucmd;
+	struct mlx5_ib_qp_base *base;
 	void *qpc;
+	u32 *in;
+	int err;
 
 	base = init_attr->qp_type == IB_QPT_RAW_PACKET ?
 	       &qp->raw_packet_qp.rq.base :
@@ -1600,7 +1611,7 @@
 		if (err)
 			return err;
 	} else {
-		in = mlx5_vzalloc(sizeof(*in));
+		in = mlx5_vzalloc(inlen);
 		if (!in)
 			return -ENOMEM;
 
@@ -1610,26 +1621,29 @@
 	if (is_sqp(init_attr->qp_type))
 		qp->port = init_attr->port_num;
 
-	in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
-				    MLX5_QP_PM_MIGRATED << 11);
+	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+	MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type));
+	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
 
 	if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
-		in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
+		MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
 	else
-		in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
+		MLX5_SET(qpc, qpc, latency_sensitive, 1);
+
 
 	if (qp->wq_sig)
-		in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
+		MLX5_SET(qpc, qpc, wq_signature, 1);
 
 	if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
-		in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
+		MLX5_SET(qpc, qpc, block_lb_mc, 1);
 
 	if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
-		in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_MASTER);
+		MLX5_SET(qpc, qpc, cd_master, 1);
 	if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
-		in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_SEND);
+		MLX5_SET(qpc, qpc, cd_slave_send, 1);
 	if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
-		in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_RECV);
+		MLX5_SET(qpc, qpc, cd_slave_receive, 1);
 
 	if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
 		int rcqe_sz;
@@ -1639,71 +1653,68 @@
 		scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
 
 		if (rcqe_sz == 128)
-			in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
+			MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
 		else
-			in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
+			MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
 
 		if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
 			if (scqe_sz == 128)
-				in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
+				MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
 			else
-				in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
+				MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
 		}
 	}
 
 	if (qp->rq.wqe_cnt) {
-		in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
-		in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
+		MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
+		MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
 	}
 
-	in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
+	MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
 
 	if (qp->sq.wqe_cnt)
-		in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
+		MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
 	else
-		in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
+		MLX5_SET(qpc, qpc, no_sq, 1);
 
 	/* Set default resources */
 	switch (init_attr->qp_type) {
 	case IB_QPT_XRC_TGT:
-		in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
-		in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
-		in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
-		in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
+		MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
+		MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
+		MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
+		MLX5_SET(qpc, qpc, xrcd, to_mxrcd(init_attr->xrcd)->xrcdn);
 		break;
 	case IB_QPT_XRC_INI:
-		in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
-		in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
-		in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
+		MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
+		MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
+		MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
 		break;
 	default:
 		if (init_attr->srq) {
-			in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
-			in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
+			MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn);
+			MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn);
 		} else {
-			in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
-			in->ctx.rq_type_srqn |=
-				cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
+			MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
+			MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn);
 		}
 	}
 
 	if (init_attr->send_cq)
-		in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
+		MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn);
 
 	if (init_attr->recv_cq)
-		in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
+		MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn);
 
-	in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
+	MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
 
-	if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
-		qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
-		/* 0xffffff means we ask to work with cqe version 0 */
+	/* 0xffffff means we ask to work with cqe version 0 */
+	if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
 		MLX5_SET(qpc, qpc, user_index, uidx);
-	}
+
 	/* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
 	if (init_attr->qp_type == IB_QPT_UD &&
 	    (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) {
-		qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
 		MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
 		qp->flags |= MLX5_IB_QP_LSO;
 	}
@@ -4320,21 +4331,24 @@
 static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
 			 struct ib_qp_attr *qp_attr)
 {
-	struct mlx5_query_qp_mbox_out *outb;
+	int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
 	struct mlx5_qp_context *context;
 	int mlx5_state;
+	u32 *outb;
 	int err = 0;
 
-	outb = kzalloc(sizeof(*outb), GFP_KERNEL);
+	outb = kzalloc(outlen, GFP_KERNEL);
 	if (!outb)
 		return -ENOMEM;
 
-	context = &outb->ctx;
 	err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb,
-				 sizeof(*outb));
+				 outlen);
 	if (err)
 		goto out;
 
+	/* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
+	context = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, outb, qpc);
+
 	mlx5_state = be32_to_cpu(context->flags) >> 28;
 
 	qp->state		     = to_ib_qp_state(mlx5_state);