mlx5: Move pci device handling from mlx5_ib to mlx5_core

In preparation for a new mlx5 device which is VPI (i.e., ports can be
either IB or ETH), move the pci device functionality from mlx5_ib
to mlx5_core.

This involves the following changes:
1. Move mlx5_core_dev struct out of mlx5_ib_dev. mlx5_core_dev
   is now an independent structure maintained by mlx5_core.
   mlx5_ib_dev now has a pointer to that struct.
   This requires changing a lot of places where the core_dev
   struct was accessed via mlx5_ib_dev (now, this needs to
   be a pointer dereference).
2. All PCI initializations are now done in mlx5_core. Thus,
   it is now mlx5_core which does pci_register_device (and not
   mlx5_ib, as was previously).
3. mlx5_ib now registers itself with mlx5_core as an "interface"
   driver. This is very similar to the mechanism employed for
   the mlx4 (ConnectX) driver. Once the HCA is initialized
   (by mlx5_core), it invokes the interface drivers to do
   their initializations.
4. There is a new event handler which the core registers:
   mlx5_core_event(). This event handler invokes the
   event handlers registered by the interfaces.

Based on a patch by Eli Cohen <eli@mellanox.com>

Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Eli Cohen <eli@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 364d4b6..f2cfd36 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -54,96 +54,17 @@
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(DRIVER_VERSION);
 
-static int prof_sel = 2;
-module_param_named(prof_sel, prof_sel, int, 0444);
-MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
+static int deprecated_prof_sel = 2;
+module_param_named(prof_sel, deprecated_prof_sel, int, 0444);
+MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core");
 
 static char mlx5_version[] =
 	DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
 	DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
 
-static struct mlx5_profile profile[] = {
-	[0] = {
-		.mask		= 0,
-	},
-	[1] = {
-		.mask		= MLX5_PROF_MASK_QP_SIZE,
-		.log_max_qp	= 12,
-	},
-	[2] = {
-		.mask		= MLX5_PROF_MASK_QP_SIZE |
-				  MLX5_PROF_MASK_MR_CACHE,
-		.log_max_qp	= 17,
-		.mr_cache[0]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[1]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[2]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[3]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[4]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[5]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[6]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[7]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[8]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[9]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[10]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[11]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[12]	= {
-			.size	= 64,
-			.limit	= 32
-		},
-		.mr_cache[13]	= {
-			.size	= 32,
-			.limit	= 16
-		},
-		.mr_cache[14]	= {
-			.size	= 16,
-			.limit	= 8
-		},
-		.mr_cache[15]	= {
-			.size	= 8,
-			.limit	= 4
-		},
-	},
-};
-
 int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
 {
-	struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
+	struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
 	struct mlx5_eq *eq, *n;
 	int err = -ENOENT;
 
@@ -163,7 +84,7 @@
 
 static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
 {
-	struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
+	struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
 	char name[MLX5_MAX_EQ_NAME];
 	struct mlx5_eq *eq, *n;
 	int ncomp_vec;
@@ -182,9 +103,9 @@
 		}
 
 		snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
-		err = mlx5_create_map_eq(&dev->mdev, eq,
+		err = mlx5_create_map_eq(dev->mdev, eq,
 					 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
-					 name, &dev->mdev.priv.uuari.uars[0]);
+					 name, &dev->mdev->priv.uuari.uars[0]);
 		if (err) {
 			kfree(eq);
 			goto clean;
@@ -204,7 +125,7 @@
 	list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
 		list_del(&eq->list);
 		spin_unlock(&table->lock);
-		if (mlx5_destroy_unmap_eq(&dev->mdev, eq))
+		if (mlx5_destroy_unmap_eq(dev->mdev, eq))
 			mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
 		kfree(eq);
 		spin_lock(&table->lock);
@@ -215,14 +136,14 @@
 
 static void free_comp_eqs(struct mlx5_ib_dev *dev)
 {
-	struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
+	struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
 	struct mlx5_eq *eq, *n;
 
 	spin_lock(&table->lock);
 	list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
 		list_del(&eq->list);
 		spin_unlock(&table->lock);
-		if (mlx5_destroy_unmap_eq(&dev->mdev, eq))
+		if (mlx5_destroy_unmap_eq(dev->mdev, eq))
 			mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
 		kfree(eq);
 		spin_lock(&table->lock);
@@ -255,14 +176,14 @@
 
 	memset(props, 0, sizeof(*props));
 
-	props->fw_ver = ((u64)fw_rev_maj(&dev->mdev) << 32) |
-		(fw_rev_min(&dev->mdev) << 16) |
-		fw_rev_sub(&dev->mdev);
+	props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
+		(fw_rev_min(dev->mdev) << 16) |
+		fw_rev_sub(dev->mdev);
 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
 		IB_DEVICE_PORT_ACTIVE_EVENT		|
 		IB_DEVICE_SYS_IMAGE_GUID		|
 		IB_DEVICE_RC_RNR_NAK_GEN;
-	flags = dev->mdev.caps.flags;
+	flags = dev->mdev->caps.flags;
 	if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
 	if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR)
@@ -292,30 +213,30 @@
 	memcpy(&props->sys_image_guid, out_mad->data +	4, 8);
 
 	props->max_mr_size	   = ~0ull;
-	props->page_size_cap	   = dev->mdev.caps.min_page_sz;
-	props->max_qp		   = 1 << dev->mdev.caps.log_max_qp;
-	props->max_qp_wr	   = dev->mdev.caps.max_wqes;
-	max_rq_sg = dev->mdev.caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
-	max_sq_sg = (dev->mdev.caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
+	props->page_size_cap	   = dev->mdev->caps.min_page_sz;
+	props->max_qp		   = 1 << dev->mdev->caps.log_max_qp;
+	props->max_qp_wr	   = dev->mdev->caps.max_wqes;
+	max_rq_sg = dev->mdev->caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
+	max_sq_sg = (dev->mdev->caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
 		sizeof(struct mlx5_wqe_data_seg);
 	props->max_sge = min(max_rq_sg, max_sq_sg);
-	props->max_cq		   = 1 << dev->mdev.caps.log_max_cq;
-	props->max_cqe		   = dev->mdev.caps.max_cqes - 1;
-	props->max_mr		   = 1 << dev->mdev.caps.log_max_mkey;
-	props->max_pd		   = 1 << dev->mdev.caps.log_max_pd;
-	props->max_qp_rd_atom	   = dev->mdev.caps.max_ra_req_qp;
-	props->max_qp_init_rd_atom = dev->mdev.caps.max_ra_res_qp;
+	props->max_cq		   = 1 << dev->mdev->caps.log_max_cq;
+	props->max_cqe		   = dev->mdev->caps.max_cqes - 1;
+	props->max_mr		   = 1 << dev->mdev->caps.log_max_mkey;
+	props->max_pd		   = 1 << dev->mdev->caps.log_max_pd;
+	props->max_qp_rd_atom	   = dev->mdev->caps.max_ra_req_qp;
+	props->max_qp_init_rd_atom = dev->mdev->caps.max_ra_res_qp;
 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
-	props->max_srq		   = 1 << dev->mdev.caps.log_max_srq;
-	props->max_srq_wr	   = dev->mdev.caps.max_srq_wqes - 1;
+	props->max_srq		   = 1 << dev->mdev->caps.log_max_srq;
+	props->max_srq_wr	   = dev->mdev->caps.max_srq_wqes - 1;
 	props->max_srq_sge	   = max_rq_sg - 1;
 	props->max_fast_reg_page_list_len = (unsigned int)-1;
-	props->local_ca_ack_delay  = dev->mdev.caps.local_ca_ack_delay;
+	props->local_ca_ack_delay  = dev->mdev->caps.local_ca_ack_delay;
 	props->atomic_cap	   = IB_ATOMIC_NONE;
 	props->masked_atomic_cap   = IB_ATOMIC_NONE;
 	props->max_pkeys	   = be16_to_cpup((__be16 *)(out_mad->data + 28));
-	props->max_mcast_grp	   = 1 << dev->mdev.caps.log_max_mcg;
-	props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
+	props->max_mcast_grp	   = 1 << dev->mdev->caps.log_max_mcg;
+	props->max_mcast_qp_attach = dev->mdev->caps.max_qp_mcg;
 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
 					   props->max_mcast_grp;
 	props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
@@ -336,7 +257,7 @@
 	int ext_active_speed;
 	int err = -ENOMEM;
 
-	if (port < 1 || port > dev->mdev.caps.num_ports) {
+	if (port < 1 || port > dev->mdev->caps.num_ports) {
 		mlx5_ib_warn(dev, "invalid port number %d\n", port);
 		return -EINVAL;
 	}
@@ -367,8 +288,8 @@
 	props->phys_state	= out_mad->data[33] >> 4;
 	props->port_cap_flags	= be32_to_cpup((__be32 *)(out_mad->data + 20));
 	props->gid_tbl_len	= out_mad->data[50];
-	props->max_msg_sz	= 1 << to_mdev(ibdev)->mdev.caps.log_max_msg;
-	props->pkey_tbl_len	= to_mdev(ibdev)->mdev.caps.port[port - 1].pkey_table_len;
+	props->max_msg_sz	= 1 << to_mdev(ibdev)->mdev->caps.log_max_msg;
+	props->pkey_tbl_len	= to_mdev(ibdev)->mdev->caps.port[port - 1].pkey_table_len;
 	props->bad_pkey_cntr	= be16_to_cpup((__be16 *)(out_mad->data + 46));
 	props->qkey_viol_cntr	= be16_to_cpup((__be16 *)(out_mad->data + 48));
 	props->active_width	= out_mad->data[31] & 0xf;
@@ -395,7 +316,7 @@
 
 	/* If reported active speed is QDR, check if is FDR-10 */
 	if (props->active_speed == 4) {
-		if (dev->mdev.caps.ext_port_cap[port - 1] &
+		if (dev->mdev->caps.ext_port_cap[port - 1] &
 		    MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
 			init_query_mad(in_mad);
 			in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
@@ -508,7 +429,7 @@
 	 * a 144 trap.  If cmd fails, just ignore.
 	 */
 	memcpy(&in, props->node_desc, 64);
-	err = mlx5_core_access_reg(&dev->mdev, &in, sizeof(in), &out,
+	err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
 				   sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
 	if (err)
 		return err;
@@ -535,7 +456,7 @@
 	tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
 		~props->clr_port_cap_mask;
 
-	err = mlx5_set_port_caps(&dev->mdev, port, tmp);
+	err = mlx5_set_port_caps(dev->mdev, port, tmp);
 
 out:
 	mutex_unlock(&dev->cap_mask_mutex);
@@ -591,14 +512,14 @@
 
 	num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
 	gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
-	resp.qp_tab_size      = 1 << dev->mdev.caps.log_max_qp;
-	resp.bf_reg_size      = dev->mdev.caps.bf_reg_size;
+	resp.qp_tab_size      = 1 << dev->mdev->caps.log_max_qp;
+	resp.bf_reg_size      = dev->mdev->caps.bf_reg_size;
 	resp.cache_line_size  = L1_CACHE_BYTES;
-	resp.max_sq_desc_sz = dev->mdev.caps.max_sq_desc_sz;
-	resp.max_rq_desc_sz = dev->mdev.caps.max_rq_desc_sz;
-	resp.max_send_wqebb = dev->mdev.caps.max_wqes;
-	resp.max_recv_wr = dev->mdev.caps.max_wqes;
-	resp.max_srq_recv_wr = dev->mdev.caps.max_srq_wqes;
+	resp.max_sq_desc_sz = dev->mdev->caps.max_sq_desc_sz;
+	resp.max_rq_desc_sz = dev->mdev->caps.max_rq_desc_sz;
+	resp.max_send_wqebb = dev->mdev->caps.max_wqes;
+	resp.max_recv_wr = dev->mdev->caps.max_wqes;
+	resp.max_srq_recv_wr = dev->mdev->caps.max_srq_wqes;
 
 	context = kzalloc(sizeof(*context), GFP_KERNEL);
 	if (!context)
@@ -635,7 +556,7 @@
 	}
 
 	for (i = 0; i < num_uars; i++) {
-		err = mlx5_cmd_alloc_uar(&dev->mdev, &uars[i].index);
+		err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
 		if (err)
 			goto out_count;
 	}
@@ -644,7 +565,7 @@
 	mutex_init(&context->db_page_mutex);
 
 	resp.tot_uuars = req.total_num_uuars;
-	resp.num_ports = dev->mdev.caps.num_ports;
+	resp.num_ports = dev->mdev->caps.num_ports;
 	err = ib_copy_to_udata(udata, &resp,
 			       sizeof(resp) - sizeof(resp.reserved));
 	if (err)
@@ -658,7 +579,7 @@
 
 out_uars:
 	for (i--; i >= 0; i--)
-		mlx5_cmd_free_uar(&dev->mdev, uars[i].index);
+		mlx5_cmd_free_uar(dev->mdev, uars[i].index);
 out_count:
 	kfree(uuari->count);
 
@@ -681,7 +602,7 @@
 	int i;
 
 	for (i = 0; i < uuari->num_uars; i++) {
-		if (mlx5_cmd_free_uar(&dev->mdev, uuari->uars[i].index))
+		if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
 			mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
 	}
 
@@ -695,7 +616,7 @@
 
 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
 {
-	return (pci_resource_start(dev->mdev.pdev, 0) >> PAGE_SHIFT) + index;
+	return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index;
 }
 
 static int get_command(unsigned long offset)
@@ -773,7 +694,7 @@
 	seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
 	seg->start_addr = 0;
 
-	err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in),
+	err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in),
 				    NULL, NULL, NULL);
 	if (err) {
 		mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
@@ -798,7 +719,7 @@
 
 	memset(&mr, 0, sizeof(mr));
 	mr.key = key;
-	err = mlx5_core_destroy_mkey(&dev->mdev, &mr);
+	err = mlx5_core_destroy_mkey(dev->mdev, &mr);
 	if (err)
 		mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
 }
@@ -815,7 +736,7 @@
 	if (!pd)
 		return ERR_PTR(-ENOMEM);
 
-	err = mlx5_core_alloc_pd(&to_mdev(ibdev)->mdev, &pd->pdn);
+	err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
 	if (err) {
 		kfree(pd);
 		return ERR_PTR(err);
@@ -824,14 +745,14 @@
 	if (context) {
 		resp.pdn = pd->pdn;
 		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
-			mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn);
+			mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
 			kfree(pd);
 			return ERR_PTR(-EFAULT);
 		}
 	} else {
 		err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
 		if (err) {
-			mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn);
+			mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
 			kfree(pd);
 			return ERR_PTR(err);
 		}
@@ -848,7 +769,7 @@
 	if (!pd->uobject)
 		free_pa_mkey(mdev, mpd->pa_lkey);
 
-	mlx5_core_dealloc_pd(&mdev->mdev, mpd->pdn);
+	mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
 	kfree(mpd);
 
 	return 0;
@@ -859,7 +780,7 @@
 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
 	int err;
 
-	err = mlx5_core_attach_mcg(&dev->mdev, gid, ibqp->qp_num);
+	err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
 	if (err)
 		mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
 			     ibqp->qp_num, gid->raw);
@@ -872,7 +793,7 @@
 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
 	int err;
 
-	err = mlx5_core_detach_mcg(&dev->mdev, gid, ibqp->qp_num);
+	err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
 	if (err)
 		mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
 			     ibqp->qp_num, gid->raw);
@@ -906,7 +827,7 @@
 	if (err)
 		goto out;
 
-	dev->mdev.rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32));
+	dev->mdev->rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32));
 	memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
 
 out:
@@ -921,7 +842,7 @@
 	struct mlx5_ib_dev *dev =
 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
 
-	return sprintf(buf, "%d\n", dev->mdev.priv.fw_pages);
+	return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
 }
 
 static ssize_t show_reg_pages(struct device *device,
@@ -930,7 +851,7 @@
 	struct mlx5_ib_dev *dev =
 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
 
-	return sprintf(buf, "%d\n", dev->mdev.priv.reg_pages);
+	return sprintf(buf, "%d\n", dev->mdev->priv.reg_pages);
 }
 
 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
@@ -938,7 +859,7 @@
 {
 	struct mlx5_ib_dev *dev =
 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
-	return sprintf(buf, "MT%d\n", dev->mdev.pdev->device);
+	return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
 }
 
 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
@@ -946,8 +867,8 @@
 {
 	struct mlx5_ib_dev *dev =
 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
-	return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(&dev->mdev),
-		       fw_rev_min(&dev->mdev), fw_rev_sub(&dev->mdev));
+	return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
+		       fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
 }
 
 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
@@ -955,7 +876,7 @@
 {
 	struct mlx5_ib_dev *dev =
 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
-	return sprintf(buf, "%x\n", dev->mdev.rev_id);
+	return sprintf(buf, "%x\n", dev->mdev->rev_id);
 }
 
 static ssize_t show_board(struct device *device, struct device_attribute *attr,
@@ -964,7 +885,7 @@
 	struct mlx5_ib_dev *dev =
 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
 	return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
-		       dev->mdev.board_id);
+		       dev->mdev->board_id);
 }
 
 static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
@@ -983,11 +904,12 @@
 	&dev_attr_reg_pages,
 };
 
-static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
-			  void *data)
+static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
+			  enum mlx5_dev_event event, void *data)
 {
-	struct mlx5_ib_dev *ibdev = container_of(dev, struct mlx5_ib_dev, mdev);
+	struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
 	struct ib_event ibev;
+
 	u8 port = 0;
 
 	switch (event) {
@@ -1047,7 +969,7 @@
 {
 	int port;
 
-	for (port = 1; port <= dev->mdev.caps.num_ports; port++)
+	for (port = 1; port <= dev->mdev->caps.num_ports; port++)
 		mlx5_query_ext_port_caps(dev, port);
 }
 
@@ -1072,14 +994,14 @@
 		goto out;
 	}
 
-	for (port = 1; port <= dev->mdev.caps.num_ports; port++) {
+	for (port = 1; port <= dev->mdev->caps.num_ports; port++) {
 		err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
 		if (err) {
 			mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err);
 			break;
 		}
-		dev->mdev.caps.port[port - 1].pkey_table_len = dprops->max_pkeys;
-		dev->mdev.caps.port[port - 1].gid_table_len = pprops->gid_tbl_len;
+		dev->mdev->caps.port[port - 1].pkey_table_len = dprops->max_pkeys;
+		dev->mdev->caps.port[port - 1].gid_table_len = pprops->gid_tbl_len;
 		mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
 			    dprops->max_pkeys, pprops->gid_tbl_len);
 	}
@@ -1328,10 +1250,8 @@
 	mlx5_ib_dealloc_pd(devr->p0);
 }
 
-static int init_one(struct pci_dev *pdev,
-		    const struct pci_device_id *id)
+static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 {
-	struct mlx5_core_dev *mdev;
 	struct mlx5_ib_dev *dev;
 	int err;
 	int i;
@@ -1340,28 +1260,19 @@
 
 	dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
 	if (!dev)
-		return -ENOMEM;
+		return NULL;
 
-	mdev = &dev->mdev;
-	mdev->event = mlx5_ib_event;
-	if (prof_sel >= ARRAY_SIZE(profile)) {
-		pr_warn("selected pofile out of range, selceting default\n");
-		prof_sel = 0;
-	}
-	mdev->profile = &profile[prof_sel];
-	err = mlx5_dev_init(mdev, pdev);
-	if (err)
-		goto err_free;
+	dev->mdev = mdev;
 
 	err = get_port_caps(dev);
 	if (err)
-		goto err_cleanup;
+		goto err_dealloc;
 
 	get_ext_port_caps(dev);
 
 	err = alloc_comp_eqs(dev);
 	if (err)
-		goto err_cleanup;
+		goto err_dealloc;
 
 	MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
 
@@ -1480,7 +1391,7 @@
 
 	dev->ib_active = true;
 
-	return 0;
+	return dev;
 
 err_umrc:
 	destroy_umrc_res(dev);
@@ -1494,49 +1405,39 @@
 err_eqs:
 	free_comp_eqs(dev);
 
-err_cleanup:
-	mlx5_dev_cleanup(mdev);
-
-err_free:
+err_dealloc:
 	ib_dealloc_device((struct ib_device *)dev);
 
-	return err;
+	return NULL;
 }
 
-static void remove_one(struct pci_dev *pdev)
+static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
 {
-	struct mlx5_ib_dev *dev = mlx5_pci2ibdev(pdev);
-
+	struct mlx5_ib_dev *dev = context;
 	destroy_umrc_res(dev);
 	ib_unregister_device(&dev->ib_dev);
 	destroy_dev_resources(&dev->devr);
 	free_comp_eqs(dev);
-	mlx5_dev_cleanup(&dev->mdev);
 	ib_dealloc_device(&dev->ib_dev);
 }
 
-static DEFINE_PCI_DEVICE_TABLE(mlx5_ib_pci_table) = {
-	{ PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */
-	{ 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, mlx5_ib_pci_table);
-
-static struct pci_driver mlx5_ib_driver = {
-	.name		= DRIVER_NAME,
-	.id_table	= mlx5_ib_pci_table,
-	.probe		= init_one,
-	.remove		= remove_one
+static struct mlx5_interface mlx5_ib_interface = {
+	.add            = mlx5_ib_add,
+	.remove         = mlx5_ib_remove,
+	.event          = mlx5_ib_event,
 };
 
 static int __init mlx5_ib_init(void)
 {
-	return pci_register_driver(&mlx5_ib_driver);
+	if (deprecated_prof_sel != 2)
+		pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
+
+	return mlx5_register_interface(&mlx5_ib_interface);
 }
 
 static void __exit mlx5_ib_cleanup(void)
 {
-	pci_unregister_driver(&mlx5_ib_driver);
+	mlx5_unregister_interface(&mlx5_ib_interface);
 }
 
 module_init(mlx5_ib_init);