blob: 1a409e7ade806dc1565d49ef0c18335f348ddb67 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
Maor Gottliebfe248c32017-05-30 10:29:14 +030033#include <linux/debugfs.h>
Christoph Hellwigadec6402015-08-28 09:27:19 +020034#include <linux/highmem.h>
Eli Cohene126ba92013-07-07 17:25:49 +030035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/errno.h>
38#include <linux/pci.h>
39#include <linux/dma-mapping.h>
40#include <linux/slab.h>
Guy Levi37aa5c32016-04-27 16:49:50 +030041#if defined(CONFIG_X86)
42#include <asm/pat.h>
43#endif
Eli Cohene126ba92013-07-07 17:25:49 +030044#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010045#include <linux/sched/mm.h>
Ingo Molnar0881e7b2017-02-05 15:30:50 +010046#include <linux/sched/task.h>
Maor Gottlieb7c2344c2016-06-17 14:56:44 +030047#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030048#include <rdma/ib_user_verbs.h>
Achiad Shochat3f89a642015-12-23 18:47:21 +020049#include <rdma/ib_addr.h>
Achiad Shochat2811ba52015-12-23 18:47:24 +020050#include <rdma/ib_cache.h>
Achiad Shochatada68c32016-02-22 18:17:23 +020051#include <linux/mlx5/port.h>
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030052#include <linux/mlx5/vport.h>
Pravin Shedge72c7fe92017-12-06 22:19:39 +053053#include <linux/mlx5/fs.h>
Maor Gottlieb7c2344c2016-06-17 14:56:44 +030054#include <linux/list.h>
Eli Cohene126ba92013-07-07 17:25:49 +030055#include <rdma/ib_smi.h>
56#include <rdma/ib_umem.h>
Maor Gottlieb038d2ef2016-01-11 10:26:07 +020057#include <linux/in.h>
58#include <linux/etherdevice.h>
Eli Cohene126ba92013-07-07 17:25:49 +030059#include "mlx5_ib.h"
Parav Pandite1f24a72017-04-16 07:29:29 +030060#include "cmd.h"
Eli Cohene126ba92013-07-07 17:25:49 +030061
62#define DRIVER_NAME "mlx5_ib"
Tariq Toukanb3599112017-02-22 17:45:46 +020063#define DRIVER_VERSION "5.0-0"
Eli Cohene126ba92013-07-07 17:25:49 +030064
65MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
66MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
67MODULE_LICENSE("Dual BSD/GPL");
Eli Cohene126ba92013-07-07 17:25:49 +030068
Eli Cohene126ba92013-07-07 17:25:49 +030069static char mlx5_version[] =
70 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
Tariq Toukanb3599112017-02-22 17:45:46 +020071 DRIVER_VERSION "\n";
Eli Cohene126ba92013-07-07 17:25:49 +030072
Eran Ben Elishada7525d2015-12-14 16:34:10 +020073enum {
74 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
75};
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030076
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030077static enum rdma_link_layer
Achiad Shochatebd61f62015-12-23 18:47:16 +020078mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030079{
Achiad Shochatebd61f62015-12-23 18:47:16 +020080 switch (port_type_cap) {
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030081 case MLX5_CAP_PORT_TYPE_IB:
82 return IB_LINK_LAYER_INFINIBAND;
83 case MLX5_CAP_PORT_TYPE_ETH:
84 return IB_LINK_LAYER_ETHERNET;
85 default:
86 return IB_LINK_LAYER_UNSPECIFIED;
87 }
88}
89
Achiad Shochatebd61f62015-12-23 18:47:16 +020090static enum rdma_link_layer
91mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
92{
93 struct mlx5_ib_dev *dev = to_mdev(device);
94 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
95
96 return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
97}
98
Moni Shouafd65f1b2017-05-30 09:56:05 +030099static int get_port_state(struct ib_device *ibdev,
100 u8 port_num,
101 enum ib_port_state *state)
102{
103 struct ib_port_attr attr;
104 int ret;
105
106 memset(&attr, 0, sizeof(attr));
107 ret = mlx5_ib_query_port(ibdev, port_num, &attr);
108 if (!ret)
109 *state = attr.state;
110 return ret;
111}
112
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200113static int mlx5_netdev_event(struct notifier_block *this,
114 unsigned long event, void *ptr)
115{
116 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
117 struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
118 roce.nb);
119
Aviv Heller5ec8c832016-09-18 20:48:00 +0300120 switch (event) {
121 case NETDEV_REGISTER:
122 case NETDEV_UNREGISTER:
123 write_lock(&ibdev->roce.netdev_lock);
124 if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
125 ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
126 NULL : ndev;
127 write_unlock(&ibdev->roce.netdev_lock);
128 break;
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200129
Moni Shouafd65f1b2017-05-30 09:56:05 +0300130 case NETDEV_CHANGE:
Aviv Heller5ec8c832016-09-18 20:48:00 +0300131 case NETDEV_UP:
Aviv Heller88621df2016-09-18 20:48:02 +0300132 case NETDEV_DOWN: {
133 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
134 struct net_device *upper = NULL;
135
136 if (lag_ndev) {
137 upper = netdev_master_upper_dev_get(lag_ndev);
138 dev_put(lag_ndev);
139 }
140
141 if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
142 && ibdev->ib_active) {
Bart Van Assche626bc022016-12-05 17:18:08 -0800143 struct ib_event ibev = { };
Moni Shouafd65f1b2017-05-30 09:56:05 +0300144 enum ib_port_state port_state;
Aviv Heller5ec8c832016-09-18 20:48:00 +0300145
Moni Shouafd65f1b2017-05-30 09:56:05 +0300146 if (get_port_state(&ibdev->ib_dev, 1, &port_state))
147 return NOTIFY_DONE;
148
149 if (ibdev->roce.last_port_state == port_state)
150 return NOTIFY_DONE;
151
152 ibdev->roce.last_port_state = port_state;
Aviv Heller5ec8c832016-09-18 20:48:00 +0300153 ibev.device = &ibdev->ib_dev;
Moni Shouafd65f1b2017-05-30 09:56:05 +0300154 if (port_state == IB_PORT_DOWN)
155 ibev.event = IB_EVENT_PORT_ERR;
156 else if (port_state == IB_PORT_ACTIVE)
157 ibev.event = IB_EVENT_PORT_ACTIVE;
158 else
159 return NOTIFY_DONE;
160
Aviv Heller5ec8c832016-09-18 20:48:00 +0300161 ibev.element.port_num = 1;
162 ib_dispatch_event(&ibev);
163 }
164 break;
Aviv Heller88621df2016-09-18 20:48:02 +0300165 }
Aviv Heller5ec8c832016-09-18 20:48:00 +0300166
167 default:
168 break;
169 }
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200170
171 return NOTIFY_DONE;
172}
173
174static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
175 u8 port_num)
176{
177 struct mlx5_ib_dev *ibdev = to_mdev(device);
178 struct net_device *ndev;
179
Aviv Heller88621df2016-09-18 20:48:02 +0300180 ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
181 if (ndev)
182 return ndev;
183
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200184 /* Ensure ndev does not disappear before we invoke dev_hold()
185 */
186 read_lock(&ibdev->roce.netdev_lock);
187 ndev = ibdev->roce.netdev;
188 if (ndev)
189 dev_hold(ndev);
190 read_unlock(&ibdev->roce.netdev_lock);
191
192 return ndev;
193}
194
Noa Osherovichf1b65df2017-04-20 20:53:33 +0300195static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
196 u8 *active_width)
197{
198 switch (eth_proto_oper) {
199 case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
200 case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
201 case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
202 case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
203 *active_width = IB_WIDTH_1X;
204 *active_speed = IB_SPEED_SDR;
205 break;
206 case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
207 case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
208 case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
209 case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
210 case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
211 case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
212 case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
213 *active_width = IB_WIDTH_1X;
214 *active_speed = IB_SPEED_QDR;
215 break;
216 case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
217 case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
218 case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
219 *active_width = IB_WIDTH_1X;
220 *active_speed = IB_SPEED_EDR;
221 break;
222 case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
223 case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
224 case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
225 case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
226 *active_width = IB_WIDTH_4X;
227 *active_speed = IB_SPEED_QDR;
228 break;
229 case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
230 case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
231 case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
232 *active_width = IB_WIDTH_1X;
233 *active_speed = IB_SPEED_HDR;
234 break;
235 case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
236 *active_width = IB_WIDTH_4X;
237 *active_speed = IB_SPEED_FDR;
238 break;
239 case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
240 case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
241 case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
242 case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
243 *active_width = IB_WIDTH_4X;
244 *active_speed = IB_SPEED_EDR;
245 break;
246 default:
247 return -EINVAL;
248 }
249
250 return 0;
251}
252
Ilan Tayari095b0922017-05-14 16:04:30 +0300253static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
254 struct ib_port_attr *props)
Achiad Shochat3f89a642015-12-23 18:47:21 +0200255{
256 struct mlx5_ib_dev *dev = to_mdev(device);
Noa Osherovichf1b65df2017-04-20 20:53:33 +0300257 struct mlx5_core_dev *mdev = dev->mdev;
Aviv Heller88621df2016-09-18 20:48:02 +0300258 struct net_device *ndev, *upper;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200259 enum ib_mtu ndev_ib_mtu;
Leon Romanovskyc876a1b2016-01-09 13:06:25 +0200260 u16 qkey_viol_cntr;
Noa Osherovichf1b65df2017-04-20 20:53:33 +0300261 u32 eth_prot_oper;
Ilan Tayari095b0922017-05-14 16:04:30 +0300262 int err;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200263
Noa Osherovichf1b65df2017-04-20 20:53:33 +0300264 /* Possible bad flows are checked before filling out props so in case
265 * of an error it will still be zeroed out.
Noa Osherovich50f22fd2017-04-20 20:53:32 +0300266 */
Ilan Tayari095b0922017-05-14 16:04:30 +0300267 err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper, port_num);
268 if (err)
269 return err;
Noa Osherovichf1b65df2017-04-20 20:53:33 +0300270
271 translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
272 &props->active_width);
Achiad Shochat3f89a642015-12-23 18:47:21 +0200273
274 props->port_cap_flags |= IB_PORT_CM_SUP;
275 props->port_cap_flags |= IB_PORT_IP_BASED_GIDS;
276
277 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
278 roce_address_table_size);
279 props->max_mtu = IB_MTU_4096;
280 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
281 props->pkey_tbl_len = 1;
282 props->state = IB_PORT_DOWN;
283 props->phys_state = 3;
284
Leon Romanovskyc876a1b2016-01-09 13:06:25 +0200285 mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr);
286 props->qkey_viol_cntr = qkey_viol_cntr;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200287
288 ndev = mlx5_ib_get_netdev(device, port_num);
289 if (!ndev)
Ilan Tayari095b0922017-05-14 16:04:30 +0300290 return 0;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200291
Aviv Heller88621df2016-09-18 20:48:02 +0300292 if (mlx5_lag_is_active(dev->mdev)) {
293 rcu_read_lock();
294 upper = netdev_master_upper_dev_get_rcu(ndev);
295 if (upper) {
296 dev_put(ndev);
297 ndev = upper;
298 dev_hold(ndev);
299 }
300 rcu_read_unlock();
301 }
302
Achiad Shochat3f89a642015-12-23 18:47:21 +0200303 if (netif_running(ndev) && netif_carrier_ok(ndev)) {
304 props->state = IB_PORT_ACTIVE;
305 props->phys_state = 5;
306 }
307
308 ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
309
310 dev_put(ndev);
311
312 props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
Ilan Tayari095b0922017-05-14 16:04:30 +0300313 return 0;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200314}
315
Ilan Tayari095b0922017-05-14 16:04:30 +0300316static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
317 unsigned int index, const union ib_gid *gid,
318 const struct ib_gid_attr *attr)
Achiad Shochat3cca2602015-12-23 18:47:23 +0200319{
Ilan Tayari095b0922017-05-14 16:04:30 +0300320 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
321 u8 roce_version = 0;
322 u8 roce_l3_type = 0;
323 bool vlan = false;
324 u8 mac[ETH_ALEN];
325 u16 vlan_id = 0;
Achiad Shochat3cca2602015-12-23 18:47:23 +0200326
Ilan Tayari095b0922017-05-14 16:04:30 +0300327 if (gid) {
328 gid_type = attr->gid_type;
329 ether_addr_copy(mac, attr->ndev->dev_addr);
Achiad Shochat3cca2602015-12-23 18:47:23 +0200330
Ilan Tayari095b0922017-05-14 16:04:30 +0300331 if (is_vlan_dev(attr->ndev)) {
332 vlan = true;
333 vlan_id = vlan_dev_vlan_id(attr->ndev);
334 }
Achiad Shochat3cca2602015-12-23 18:47:23 +0200335 }
336
Ilan Tayari095b0922017-05-14 16:04:30 +0300337 switch (gid_type) {
Achiad Shochat3cca2602015-12-23 18:47:23 +0200338 case IB_GID_TYPE_IB:
Ilan Tayari095b0922017-05-14 16:04:30 +0300339 roce_version = MLX5_ROCE_VERSION_1;
Achiad Shochat3cca2602015-12-23 18:47:23 +0200340 break;
341 case IB_GID_TYPE_ROCE_UDP_ENCAP:
Ilan Tayari095b0922017-05-14 16:04:30 +0300342 roce_version = MLX5_ROCE_VERSION_2;
343 if (ipv6_addr_v4mapped((void *)gid))
344 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
345 else
346 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
Achiad Shochat3cca2602015-12-23 18:47:23 +0200347 break;
348
349 default:
Ilan Tayari095b0922017-05-14 16:04:30 +0300350 mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
Achiad Shochat3cca2602015-12-23 18:47:23 +0200351 }
352
Ilan Tayari095b0922017-05-14 16:04:30 +0300353 return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
354 roce_l3_type, gid->raw, mac, vlan,
355 vlan_id);
Achiad Shochat3cca2602015-12-23 18:47:23 +0200356}
357
358static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
359 unsigned int index, const union ib_gid *gid,
360 const struct ib_gid_attr *attr,
361 __always_unused void **context)
362{
Ilan Tayari095b0922017-05-14 16:04:30 +0300363 return set_roce_addr(to_mdev(device), port_num, index, gid, attr);
Achiad Shochat3cca2602015-12-23 18:47:23 +0200364}
365
366static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num,
367 unsigned int index, __always_unused void **context)
368{
Ilan Tayari095b0922017-05-14 16:04:30 +0300369 return set_roce_addr(to_mdev(device), port_num, index, NULL, NULL);
Achiad Shochat3cca2602015-12-23 18:47:23 +0200370}
371
Achiad Shochat2811ba52015-12-23 18:47:24 +0200372__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
373 int index)
374{
375 struct ib_gid_attr attr;
376 union ib_gid gid;
377
378 if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
379 return 0;
380
381 if (!attr.ndev)
382 return 0;
383
384 dev_put(attr.ndev);
385
386 if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
387 return 0;
388
389 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
390}
391
Majd Dibbinyed884512017-01-18 14:10:35 +0200392int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
393 int index, enum ib_gid_type *gid_type)
394{
395 struct ib_gid_attr attr;
396 union ib_gid gid;
397 int ret;
398
399 ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
400 if (ret)
401 return ret;
402
403 if (!attr.ndev)
404 return -ENODEV;
405
406 dev_put(attr.ndev);
407
408 *gid_type = attr.gid_type;
409
410 return 0;
411}
412
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300413static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
414{
Noa Osherovich7fae6652016-09-12 19:16:23 +0300415 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
416 return !MLX5_CAP_GEN(dev->mdev, ib_virt);
417 return 0;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300418}
419
420enum {
421 MLX5_VPORT_ACCESS_METHOD_MAD,
422 MLX5_VPORT_ACCESS_METHOD_HCA,
423 MLX5_VPORT_ACCESS_METHOD_NIC,
424};
425
426static int mlx5_get_vport_access_method(struct ib_device *ibdev)
427{
428 if (mlx5_use_mad_ifc(to_mdev(ibdev)))
429 return MLX5_VPORT_ACCESS_METHOD_MAD;
430
Achiad Shochatebd61f62015-12-23 18:47:16 +0200431 if (mlx5_ib_port_link_layer(ibdev, 1) ==
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300432 IB_LINK_LAYER_ETHERNET)
433 return MLX5_VPORT_ACCESS_METHOD_NIC;
434
435 return MLX5_VPORT_ACCESS_METHOD_HCA;
436}
437
Eran Ben Elishada7525d2015-12-14 16:34:10 +0200438static void get_atomic_caps(struct mlx5_ib_dev *dev,
439 struct ib_device_attr *props)
440{
441 u8 tmp;
442 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
443 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
444 u8 atomic_req_8B_endianness_mode =
Or Gerlitzbd108382017-05-28 15:24:17 +0300445 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
Eran Ben Elishada7525d2015-12-14 16:34:10 +0200446
447 /* Check if HW supports 8 bytes standard atomic operations and capable
448 * of host endianness respond
449 */
450 tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
451 if (((atomic_operations & tmp) == tmp) &&
452 (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
453 (atomic_req_8B_endianness_mode)) {
454 props->atomic_cap = IB_ATOMIC_HCA;
455 } else {
456 props->atomic_cap = IB_ATOMIC_NONE;
457 }
458}
459
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300460static int mlx5_query_system_image_guid(struct ib_device *ibdev,
461 __be64 *sys_image_guid)
462{
463 struct mlx5_ib_dev *dev = to_mdev(ibdev);
464 struct mlx5_core_dev *mdev = dev->mdev;
465 u64 tmp;
466 int err;
467
468 switch (mlx5_get_vport_access_method(ibdev)) {
469 case MLX5_VPORT_ACCESS_METHOD_MAD:
470 return mlx5_query_mad_ifc_system_image_guid(ibdev,
471 sys_image_guid);
472
473 case MLX5_VPORT_ACCESS_METHOD_HCA:
474 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
Achiad Shochat3f89a642015-12-23 18:47:21 +0200475 break;
476
477 case MLX5_VPORT_ACCESS_METHOD_NIC:
478 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
479 break;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300480
481 default:
482 return -EINVAL;
483 }
Achiad Shochat3f89a642015-12-23 18:47:21 +0200484
485 if (!err)
486 *sys_image_guid = cpu_to_be64(tmp);
487
488 return err;
489
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300490}
491
492static int mlx5_query_max_pkeys(struct ib_device *ibdev,
493 u16 *max_pkeys)
494{
495 struct mlx5_ib_dev *dev = to_mdev(ibdev);
496 struct mlx5_core_dev *mdev = dev->mdev;
497
498 switch (mlx5_get_vport_access_method(ibdev)) {
499 case MLX5_VPORT_ACCESS_METHOD_MAD:
500 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
501
502 case MLX5_VPORT_ACCESS_METHOD_HCA:
503 case MLX5_VPORT_ACCESS_METHOD_NIC:
504 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
505 pkey_table_size));
506 return 0;
507
508 default:
509 return -EINVAL;
510 }
511}
512
513static int mlx5_query_vendor_id(struct ib_device *ibdev,
514 u32 *vendor_id)
515{
516 struct mlx5_ib_dev *dev = to_mdev(ibdev);
517
518 switch (mlx5_get_vport_access_method(ibdev)) {
519 case MLX5_VPORT_ACCESS_METHOD_MAD:
520 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
521
522 case MLX5_VPORT_ACCESS_METHOD_HCA:
523 case MLX5_VPORT_ACCESS_METHOD_NIC:
524 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
525
526 default:
527 return -EINVAL;
528 }
529}
530
531static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
532 __be64 *node_guid)
533{
534 u64 tmp;
535 int err;
536
537 switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
538 case MLX5_VPORT_ACCESS_METHOD_MAD:
539 return mlx5_query_mad_ifc_node_guid(dev, node_guid);
540
541 case MLX5_VPORT_ACCESS_METHOD_HCA:
542 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
Achiad Shochat3f89a642015-12-23 18:47:21 +0200543 break;
544
545 case MLX5_VPORT_ACCESS_METHOD_NIC:
546 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
547 break;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300548
549 default:
550 return -EINVAL;
551 }
Achiad Shochat3f89a642015-12-23 18:47:21 +0200552
553 if (!err)
554 *node_guid = cpu_to_be64(tmp);
555
556 return err;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300557}
558
559struct mlx5_reg_node_desc {
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700560 u8 desc[IB_DEVICE_NODE_DESC_MAX];
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300561};
562
563static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
564{
565 struct mlx5_reg_node_desc in;
566
567 if (mlx5_use_mad_ifc(dev))
568 return mlx5_query_mad_ifc_node_desc(dev, node_desc);
569
570 memset(&in, 0, sizeof(in));
571
572 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
573 sizeof(struct mlx5_reg_node_desc),
574 MLX5_REG_NODE_DESC, 0, 0);
575}
576
Eli Cohene126ba92013-07-07 17:25:49 +0300577static int mlx5_ib_query_device(struct ib_device *ibdev,
Matan Barak2528e332015-06-11 16:35:25 +0300578 struct ib_device_attr *props,
579 struct ib_udata *uhw)
Eli Cohene126ba92013-07-07 17:25:49 +0300580{
581 struct mlx5_ib_dev *dev = to_mdev(ibdev);
Saeed Mahameed938fe832015-05-28 22:28:41 +0300582 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300583 int err = -ENOMEM;
Eli Cohen288c01b2016-10-27 16:36:45 +0300584 int max_sq_desc;
Eli Cohene126ba92013-07-07 17:25:49 +0300585 int max_rq_sg;
586 int max_sq_sg;
Sagi Grimberge0238a62015-07-21 14:40:12 +0300587 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
Bodong Wang402ca532016-06-17 15:02:20 +0300588 struct mlx5_ib_query_device_resp resp = {};
589 size_t resp_len;
590 u64 max_tso;
Eli Cohene126ba92013-07-07 17:25:49 +0300591
Bodong Wang402ca532016-06-17 15:02:20 +0300592 resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
593 if (uhw->outlen && uhw->outlen < resp_len)
594 return -EINVAL;
595 else
596 resp.response_length = resp_len;
597
598 if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
Matan Barak2528e332015-06-11 16:35:25 +0300599 return -EINVAL;
600
Eli Cohene126ba92013-07-07 17:25:49 +0300601 memset(props, 0, sizeof(*props));
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300602 err = mlx5_query_system_image_guid(ibdev,
603 &props->sys_image_guid);
604 if (err)
605 return err;
606
607 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
608 if (err)
609 return err;
610
611 err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
612 if (err)
613 return err;
Eli Cohene126ba92013-07-07 17:25:49 +0300614
Jack Morgenstein9603b612014-07-28 23:30:22 +0300615 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
616 (fw_rev_min(dev->mdev) << 16) |
617 fw_rev_sub(dev->mdev);
Eli Cohene126ba92013-07-07 17:25:49 +0300618 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
619 IB_DEVICE_PORT_ACTIVE_EVENT |
620 IB_DEVICE_SYS_IMAGE_GUID |
Eli Cohen1a4c3a32014-02-06 17:41:25 +0200621 IB_DEVICE_RC_RNR_NAK_GEN;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300622
623 if (MLX5_CAP_GEN(mdev, pkv))
Eli Cohene126ba92013-07-07 17:25:49 +0300624 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300625 if (MLX5_CAP_GEN(mdev, qkv))
Eli Cohene126ba92013-07-07 17:25:49 +0300626 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300627 if (MLX5_CAP_GEN(mdev, apm))
Eli Cohene126ba92013-07-07 17:25:49 +0300628 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300629 if (MLX5_CAP_GEN(mdev, xrc))
Eli Cohene126ba92013-07-07 17:25:49 +0300630 props->device_cap_flags |= IB_DEVICE_XRC;
Matan Barakd2370e02016-02-29 18:05:30 +0200631 if (MLX5_CAP_GEN(mdev, imaicl)) {
632 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
633 IB_DEVICE_MEM_WINDOW_TYPE_2B;
634 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
Sagi Grimbergb005d312016-02-29 19:07:33 +0200635 /* We support 'Gappy' memory registration too */
636 props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
Matan Barakd2370e02016-02-29 18:05:30 +0200637 }
Eli Cohene126ba92013-07-07 17:25:49 +0300638 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300639 if (MLX5_CAP_GEN(mdev, sho)) {
Sagi Grimberg2dea9092014-02-23 14:19:13 +0200640 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
641 /* At this stage no support for signature handover */
642 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
643 IB_PROT_T10DIF_TYPE_2 |
644 IB_PROT_T10DIF_TYPE_3;
645 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
646 IB_GUARD_T10DIF_CSUM;
647 }
Saeed Mahameed938fe832015-05-28 22:28:41 +0300648 if (MLX5_CAP_GEN(mdev, block_lb_mc))
Eli Cohenf360d882014-04-02 00:10:16 +0300649 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
Eli Cohene126ba92013-07-07 17:25:49 +0300650
Bodong Wang402ca532016-06-17 15:02:20 +0300651 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) {
Noa Osheroviche8161332017-01-18 15:40:01 +0200652 if (MLX5_CAP_ETH(mdev, csum_cap)) {
653 /* Legacy bit to support old userspace libraries */
Bodong Wang88115fe2015-12-18 13:53:20 +0200654 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
Noa Osheroviche8161332017-01-18 15:40:01 +0200655 props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
656 }
657
658 if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
659 props->raw_packet_caps |=
660 IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
Bodong Wang88115fe2015-12-18 13:53:20 +0200661
Bodong Wang402ca532016-06-17 15:02:20 +0300662 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
663 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
664 if (max_tso) {
665 resp.tso_caps.max_tso = 1 << max_tso;
666 resp.tso_caps.supported_qpts |=
667 1 << IB_QPT_RAW_PACKET;
668 resp.response_length += sizeof(resp.tso_caps);
669 }
670 }
Yishai Hadas31f69a82016-08-28 11:28:45 +0300671
672 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
673 resp.rss_caps.rx_hash_function =
674 MLX5_RX_HASH_FUNC_TOEPLITZ;
675 resp.rss_caps.rx_hash_fields_mask =
676 MLX5_RX_HASH_SRC_IPV4 |
677 MLX5_RX_HASH_DST_IPV4 |
678 MLX5_RX_HASH_SRC_IPV6 |
679 MLX5_RX_HASH_DST_IPV6 |
680 MLX5_RX_HASH_SRC_PORT_TCP |
681 MLX5_RX_HASH_DST_PORT_TCP |
682 MLX5_RX_HASH_SRC_PORT_UDP |
Maor Gottlieb4e2b53a2017-12-24 14:51:25 +0200683 MLX5_RX_HASH_DST_PORT_UDP |
684 MLX5_RX_HASH_INNER;
Yishai Hadas31f69a82016-08-28 11:28:45 +0300685 resp.response_length += sizeof(resp.rss_caps);
686 }
687 } else {
688 if (field_avail(typeof(resp), tso_caps, uhw->outlen))
689 resp.response_length += sizeof(resp.tso_caps);
690 if (field_avail(typeof(resp), rss_caps, uhw->outlen))
691 resp.response_length += sizeof(resp.rss_caps);
Bodong Wang402ca532016-06-17 15:02:20 +0300692 }
693
Erez Shitritf0313962016-02-21 16:27:17 +0200694 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
695 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
696 props->device_cap_flags |= IB_DEVICE_UD_TSO;
697 }
698
Maor Gottlieb03404e82017-05-30 10:29:13 +0300699 if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
700 MLX5_CAP_GEN(dev->mdev, general_notification_event))
701 props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
702
Yishai Hadas1d54f892017-06-08 16:15:11 +0300703 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
704 MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
705 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
706
Majd Dibbinycff5a0f2016-04-17 17:19:38 +0300707 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
Noa Osheroviche8161332017-01-18 15:40:01 +0200708 MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
709 /* Legacy bit to support old userspace libraries */
Majd Dibbinycff5a0f2016-04-17 17:19:38 +0300710 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
Noa Osheroviche8161332017-01-18 15:40:01 +0200711 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
712 }
Majd Dibbinycff5a0f2016-04-17 17:19:38 +0300713
Maor Gottliebda6d6ba32016-06-04 15:15:28 +0300714 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
715 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
716
Noa Osherovichb1383aa2017-10-29 13:59:45 +0200717 if (MLX5_CAP_GEN(mdev, end_pad))
718 props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
719
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300720 props->vendor_part_id = mdev->pdev->device;
721 props->hw_ver = mdev->pdev->revision;
Eli Cohene126ba92013-07-07 17:25:49 +0300722
723 props->max_mr_size = ~0ull;
Sagi Grimberge0238a62015-07-21 14:40:12 +0300724 props->page_size_cap = ~(min_page_size - 1);
Saeed Mahameed938fe832015-05-28 22:28:41 +0300725 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
726 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
727 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
728 sizeof(struct mlx5_wqe_data_seg);
Eli Cohen288c01b2016-10-27 16:36:45 +0300729 max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
730 max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
731 sizeof(struct mlx5_wqe_raddr_seg)) /
732 sizeof(struct mlx5_wqe_data_seg);
Eli Cohene126ba92013-07-07 17:25:49 +0300733 props->max_sge = min(max_rq_sg, max_sq_sg);
Sagi Grimberg986ef952016-03-31 19:03:25 +0300734 props->max_sge_rd = MLX5_MAX_SGE_RD;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300735 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
Leon Romanovsky9f177682016-01-14 08:11:40 +0200736 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300737 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
738 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
739 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
740 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
741 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
742 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
743 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
Eli Cohene126ba92013-07-07 17:25:49 +0300744 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
Eli Cohene126ba92013-07-07 17:25:49 +0300745 props->max_srq_sge = max_rq_sg - 1;
Sagi Grimberg911f4332016-03-03 13:37:51 +0200746 props->max_fast_reg_page_list_len =
747 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
Eran Ben Elishada7525d2015-12-14 16:34:10 +0200748 get_atomic_caps(dev, props);
Eli Cohen81bea282013-09-11 16:35:30 +0300749 props->masked_atomic_cap = IB_ATOMIC_NONE;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300750 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
751 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
Eli Cohene126ba92013-07-07 17:25:49 +0300752 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
753 props->max_mcast_grp;
754 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
Maor Gottlieb86695a62016-10-27 16:36:38 +0300755 props->max_ah = INT_MAX;
Matan Barak7c60bcb2015-12-15 20:30:11 +0200756 props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
757 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
Eli Cohene126ba92013-07-07 17:25:49 +0300758
Haggai Eran8cdd3122014-12-11 17:04:20 +0200759#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Saeed Mahameed938fe832015-05-28 22:28:41 +0300760 if (MLX5_CAP_GEN(mdev, pg))
Haggai Eran8cdd3122014-12-11 17:04:20 +0200761 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
762 props->odp_caps = dev->odp_caps;
763#endif
764
Leon Romanovsky051f2632015-12-20 12:16:11 +0200765 if (MLX5_CAP_GEN(mdev, cd))
766 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
767
Eli Coheneff901d2016-03-11 22:58:42 +0200768 if (!mlx5_core_is_pf(mdev))
769 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
770
Yishai Hadas31f69a82016-08-28 11:28:45 +0300771 if (mlx5_ib_port_link_layer(ibdev, 1) ==
772 IB_LINK_LAYER_ETHERNET) {
773 props->rss_caps.max_rwq_indirection_tables =
774 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
775 props->rss_caps.max_rwq_indirection_table_size =
776 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
777 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
778 props->max_wq_type_rq =
779 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
780 }
781
Artemy Kovalyoveb761892017-08-17 15:52:09 +0300782 if (MLX5_CAP_GEN(mdev, tag_matching)) {
Leon Romanovsky78b1beb2017-09-24 21:46:29 +0300783 props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
784 props->tm_caps.max_num_tags =
Artemy Kovalyoveb761892017-08-17 15:52:09 +0300785 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
Leon Romanovsky78b1beb2017-09-24 21:46:29 +0300786 props->tm_caps.flags = IB_TM_CAP_RC;
787 props->tm_caps.max_ops =
Artemy Kovalyoveb761892017-08-17 15:52:09 +0300788 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
Leon Romanovsky78b1beb2017-09-24 21:46:29 +0300789 props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
Artemy Kovalyoveb761892017-08-17 15:52:09 +0300790 }
791
Yonatan Cohen87ab3f52017-11-13 10:51:18 +0200792 if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
793 props->cq_caps.max_cq_moderation_count =
794 MLX5_MAX_CQ_COUNT;
795 props->cq_caps.max_cq_moderation_period =
796 MLX5_MAX_CQ_PERIOD;
797 }
798
Bodong Wang7e43a2a2016-10-31 12:16:44 +0200799 if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
800 resp.cqe_comp_caps.max_num =
801 MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
802 MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0;
803 resp.cqe_comp_caps.supported_format =
804 MLX5_IB_CQE_RES_FORMAT_HASH |
805 MLX5_IB_CQE_RES_FORMAT_CSUM;
806 resp.response_length += sizeof(resp.cqe_comp_caps);
807 }
808
Bodong Wangd9491672016-12-01 13:43:13 +0200809 if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
810 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
811 MLX5_CAP_GEN(mdev, qos)) {
812 resp.packet_pacing_caps.qp_rate_limit_max =
813 MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
814 resp.packet_pacing_caps.qp_rate_limit_min =
815 MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
816 resp.packet_pacing_caps.supported_qpts |=
817 1 << IB_QPT_RAW_PACKET;
818 }
819 resp.response_length += sizeof(resp.packet_pacing_caps);
820 }
821
Leon Romanovsky9f885202017-01-02 11:37:39 +0200822 if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
823 uhw->outlen)) {
Bodong Wang795b6092017-08-17 15:52:34 +0300824 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
825 resp.mlx5_ib_support_multi_pkt_send_wqes =
826 MLX5_IB_ALLOW_MPW;
Bodong Wang050da902017-08-17 15:52:35 +0300827
828 if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
829 resp.mlx5_ib_support_multi_pkt_send_wqes |=
830 MLX5_IB_SUPPORT_EMPW;
831
Leon Romanovsky9f885202017-01-02 11:37:39 +0200832 resp.response_length +=
833 sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
834 }
835
Guy Levide57f2a2017-10-19 08:25:52 +0300836 if (field_avail(typeof(resp), flags, uhw->outlen)) {
837 resp.response_length += sizeof(resp.flags);
Guy Levi7a0c8f42017-10-19 08:25:53 +0300838
Guy Levide57f2a2017-10-19 08:25:52 +0300839 if (MLX5_CAP_GEN(mdev, cqe_compression_128))
840 resp.flags |=
841 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
Guy Levi7a0c8f42017-10-19 08:25:53 +0300842
843 if (MLX5_CAP_GEN(mdev, cqe_128_always))
844 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
Guy Levide57f2a2017-10-19 08:25:52 +0300845 }
Leon Romanovsky9f885202017-01-02 11:37:39 +0200846
Noa Osherovich96dc3fc2017-08-17 15:52:28 +0300847 if (field_avail(typeof(resp), sw_parsing_caps,
848 uhw->outlen)) {
849 resp.response_length += sizeof(resp.sw_parsing_caps);
850 if (MLX5_CAP_ETH(mdev, swp)) {
851 resp.sw_parsing_caps.sw_parsing_offloads |=
852 MLX5_IB_SW_PARSING;
853
854 if (MLX5_CAP_ETH(mdev, swp_csum))
855 resp.sw_parsing_caps.sw_parsing_offloads |=
856 MLX5_IB_SW_PARSING_CSUM;
857
858 if (MLX5_CAP_ETH(mdev, swp_lso))
859 resp.sw_parsing_caps.sw_parsing_offloads |=
860 MLX5_IB_SW_PARSING_LSO;
861
862 if (resp.sw_parsing_caps.sw_parsing_offloads)
863 resp.sw_parsing_caps.supported_qpts =
864 BIT(IB_QPT_RAW_PACKET);
865 }
866 }
867
Noa Osherovichb4f34592017-10-17 18:01:12 +0300868 if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen)) {
869 resp.response_length += sizeof(resp.striding_rq_caps);
870 if (MLX5_CAP_GEN(mdev, striding_rq)) {
871 resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
872 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
873 resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
874 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
875 resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
876 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
877 resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
878 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
879 resp.striding_rq_caps.supported_qpts =
880 BIT(IB_QPT_RAW_PACKET);
881 }
882 }
883
Maor Gottliebf95ef6c2017-10-19 08:25:55 +0300884 if (field_avail(typeof(resp), tunnel_offloads_caps,
885 uhw->outlen)) {
886 resp.response_length += sizeof(resp.tunnel_offloads_caps);
887 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
888 resp.tunnel_offloads_caps |=
889 MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
890 if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
891 resp.tunnel_offloads_caps |=
892 MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
893 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
894 resp.tunnel_offloads_caps |=
895 MLX5_IB_TUNNELED_OFFLOADS_GRE;
896 }
897
Bodong Wang402ca532016-06-17 15:02:20 +0300898 if (uhw->outlen) {
899 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
900
901 if (err)
902 return err;
903 }
904
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300905 return 0;
906}
Eli Cohene126ba92013-07-07 17:25:49 +0300907
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300908enum mlx5_ib_width {
909 MLX5_IB_WIDTH_1X = 1 << 0,
910 MLX5_IB_WIDTH_2X = 1 << 1,
911 MLX5_IB_WIDTH_4X = 1 << 2,
912 MLX5_IB_WIDTH_8X = 1 << 3,
913 MLX5_IB_WIDTH_12X = 1 << 4
914};
915
916static int translate_active_width(struct ib_device *ibdev, u8 active_width,
917 u8 *ib_width)
918{
919 struct mlx5_ib_dev *dev = to_mdev(ibdev);
920 int err = 0;
921
922 if (active_width & MLX5_IB_WIDTH_1X) {
923 *ib_width = IB_WIDTH_1X;
924 } else if (active_width & MLX5_IB_WIDTH_2X) {
925 mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
926 (int)active_width);
927 err = -EINVAL;
928 } else if (active_width & MLX5_IB_WIDTH_4X) {
929 *ib_width = IB_WIDTH_4X;
930 } else if (active_width & MLX5_IB_WIDTH_8X) {
931 *ib_width = IB_WIDTH_8X;
932 } else if (active_width & MLX5_IB_WIDTH_12X) {
933 *ib_width = IB_WIDTH_12X;
934 } else {
935 mlx5_ib_dbg(dev, "Invalid active_width %d\n",
936 (int)active_width);
937 err = -EINVAL;
938 }
939
940 return err;
941}
942
943static int mlx5_mtu_to_ib_mtu(int mtu)
944{
945 switch (mtu) {
946 case 256: return 1;
947 case 512: return 2;
948 case 1024: return 3;
949 case 2048: return 4;
950 case 4096: return 5;
951 default:
952 pr_warn("invalid mtu\n");
953 return -1;
954 }
955}
956
957enum ib_max_vl_num {
958 __IB_MAX_VL_0 = 1,
959 __IB_MAX_VL_0_1 = 2,
960 __IB_MAX_VL_0_3 = 3,
961 __IB_MAX_VL_0_7 = 4,
962 __IB_MAX_VL_0_14 = 5,
963};
964
965enum mlx5_vl_hw_cap {
966 MLX5_VL_HW_0 = 1,
967 MLX5_VL_HW_0_1 = 2,
968 MLX5_VL_HW_0_2 = 3,
969 MLX5_VL_HW_0_3 = 4,
970 MLX5_VL_HW_0_4 = 5,
971 MLX5_VL_HW_0_5 = 6,
972 MLX5_VL_HW_0_6 = 7,
973 MLX5_VL_HW_0_7 = 8,
974 MLX5_VL_HW_0_14 = 15
975};
976
977static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
978 u8 *max_vl_num)
979{
980 switch (vl_hw_cap) {
981 case MLX5_VL_HW_0:
982 *max_vl_num = __IB_MAX_VL_0;
983 break;
984 case MLX5_VL_HW_0_1:
985 *max_vl_num = __IB_MAX_VL_0_1;
986 break;
987 case MLX5_VL_HW_0_3:
988 *max_vl_num = __IB_MAX_VL_0_3;
989 break;
990 case MLX5_VL_HW_0_7:
991 *max_vl_num = __IB_MAX_VL_0_7;
992 break;
993 case MLX5_VL_HW_0_14:
994 *max_vl_num = __IB_MAX_VL_0_14;
995 break;
996
997 default:
998 return -EINVAL;
999 }
1000
1001 return 0;
1002}
1003
1004static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
1005 struct ib_port_attr *props)
1006{
1007 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1008 struct mlx5_core_dev *mdev = dev->mdev;
1009 struct mlx5_hca_vport_context *rep;
Saeed Mahameed046339e2016-04-22 00:33:03 +03001010 u16 max_mtu;
1011 u16 oper_mtu;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001012 int err;
1013 u8 ib_link_width_oper;
1014 u8 vl_hw_cap;
1015
1016 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1017 if (!rep) {
1018 err = -ENOMEM;
1019 goto out;
1020 }
1021
Or Gerlitzc4550c62017-01-24 13:02:39 +02001022 /* props being zeroed by the caller, avoid zeroing it here */
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001023
1024 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1025 if (err)
1026 goto out;
1027
1028 props->lid = rep->lid;
1029 props->lmc = rep->lmc;
1030 props->sm_lid = rep->sm_lid;
1031 props->sm_sl = rep->sm_sl;
1032 props->state = rep->vport_state;
1033 props->phys_state = rep->port_physical_state;
1034 props->port_cap_flags = rep->cap_mask1;
1035 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1036 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1037 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1038 props->bad_pkey_cntr = rep->pkey_violation_counter;
1039 props->qkey_viol_cntr = rep->qkey_violation_counter;
1040 props->subnet_timeout = rep->subnet_timeout;
1041 props->init_type_reply = rep->init_type_reply;
Eli Coheneff901d2016-03-11 22:58:42 +02001042 props->grh_required = rep->grh_required;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001043
1044 err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
1045 if (err)
1046 goto out;
1047
1048 err = translate_active_width(ibdev, ib_link_width_oper,
1049 &props->active_width);
1050 if (err)
1051 goto out;
Noa Osherovichd5beb7f2016-06-02 10:47:53 +03001052 err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001053 if (err)
1054 goto out;
1055
Saeed Mahameedfacc9692015-06-11 14:47:27 +03001056 mlx5_query_port_max_mtu(mdev, &max_mtu, port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001057
1058 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
1059
Saeed Mahameedfacc9692015-06-11 14:47:27 +03001060 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001061
1062 props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
1063
1064 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1065 if (err)
1066 goto out;
1067
1068 err = translate_max_vl_num(ibdev, vl_hw_cap,
1069 &props->max_vl_num);
1070out:
1071 kfree(rep);
Eli Cohene126ba92013-07-07 17:25:49 +03001072 return err;
1073}
1074
1075int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1076 struct ib_port_attr *props)
1077{
Ilan Tayari095b0922017-05-14 16:04:30 +03001078 unsigned int count;
1079 int ret;
1080
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001081 switch (mlx5_get_vport_access_method(ibdev)) {
1082 case MLX5_VPORT_ACCESS_METHOD_MAD:
Ilan Tayari095b0922017-05-14 16:04:30 +03001083 ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1084 break;
Eli Cohene126ba92013-07-07 17:25:49 +03001085
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001086 case MLX5_VPORT_ACCESS_METHOD_HCA:
Ilan Tayari095b0922017-05-14 16:04:30 +03001087 ret = mlx5_query_hca_port(ibdev, port, props);
1088 break;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001089
Achiad Shochat3f89a642015-12-23 18:47:21 +02001090 case MLX5_VPORT_ACCESS_METHOD_NIC:
Ilan Tayari095b0922017-05-14 16:04:30 +03001091 ret = mlx5_query_port_roce(ibdev, port, props);
1092 break;
Achiad Shochat3f89a642015-12-23 18:47:21 +02001093
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001094 default:
Ilan Tayari095b0922017-05-14 16:04:30 +03001095 ret = -EINVAL;
Eli Cohene126ba92013-07-07 17:25:49 +03001096 }
Ilan Tayari095b0922017-05-14 16:04:30 +03001097
1098 if (!ret && props) {
1099 count = mlx5_core_reserved_gids_count(to_mdev(ibdev)->mdev);
1100 props->gid_tbl_len -= count;
1101 }
1102 return ret;
Eli Cohene126ba92013-07-07 17:25:49 +03001103}
1104
1105static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
1106 union ib_gid *gid)
1107{
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001108 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1109 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +03001110
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001111 switch (mlx5_get_vport_access_method(ibdev)) {
1112 case MLX5_VPORT_ACCESS_METHOD_MAD:
1113 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
Eli Cohene126ba92013-07-07 17:25:49 +03001114
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001115 case MLX5_VPORT_ACCESS_METHOD_HCA:
1116 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
Eli Cohene126ba92013-07-07 17:25:49 +03001117
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001118 default:
1119 return -EINVAL;
1120 }
Eli Cohene126ba92013-07-07 17:25:49 +03001121
Eli Cohene126ba92013-07-07 17:25:49 +03001122}
1123
1124static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1125 u16 *pkey)
1126{
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001127 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1128 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +03001129
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001130 switch (mlx5_get_vport_access_method(ibdev)) {
1131 case MLX5_VPORT_ACCESS_METHOD_MAD:
1132 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
Eli Cohene126ba92013-07-07 17:25:49 +03001133
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001134 case MLX5_VPORT_ACCESS_METHOD_HCA:
1135 case MLX5_VPORT_ACCESS_METHOD_NIC:
1136 return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
1137 pkey);
1138 default:
1139 return -EINVAL;
1140 }
Eli Cohene126ba92013-07-07 17:25:49 +03001141}
1142
Eli Cohene126ba92013-07-07 17:25:49 +03001143static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1144 struct ib_device_modify *props)
1145{
1146 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1147 struct mlx5_reg_node_desc in;
1148 struct mlx5_reg_node_desc out;
1149 int err;
1150
1151 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1152 return -EOPNOTSUPP;
1153
1154 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1155 return 0;
1156
1157 /*
1158 * If possible, pass node desc to FW, so it can generate
1159 * a 144 trap. If cmd fails, just ignore.
1160 */
Yuval Shaiabd99fde2016-08-25 10:57:07 -07001161 memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001162 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
Eli Cohene126ba92013-07-07 17:25:49 +03001163 sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
1164 if (err)
1165 return err;
1166
Yuval Shaiabd99fde2016-08-25 10:57:07 -07001167 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
Eli Cohene126ba92013-07-07 17:25:49 +03001168
1169 return err;
1170}
1171
Eli Cohencdbe33d2017-02-14 07:25:38 +02001172static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
1173 u32 value)
1174{
1175 struct mlx5_hca_vport_context ctx = {};
1176 int err;
1177
1178 err = mlx5_query_hca_vport_context(dev->mdev, 0,
1179 port_num, 0, &ctx);
1180 if (err)
1181 return err;
1182
1183 if (~ctx.cap_mask1_perm & mask) {
1184 mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1185 mask, ctx.cap_mask1_perm);
1186 return -EINVAL;
1187 }
1188
1189 ctx.cap_mask1 = value;
1190 ctx.cap_mask1_perm = mask;
1191 err = mlx5_core_modify_hca_vport_context(dev->mdev, 0,
1192 port_num, 0, &ctx);
1193
1194 return err;
1195}
1196
Eli Cohene126ba92013-07-07 17:25:49 +03001197static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1198 struct ib_port_modify *props)
1199{
1200 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1201 struct ib_port_attr attr;
1202 u32 tmp;
1203 int err;
Eli Cohencdbe33d2017-02-14 07:25:38 +02001204 u32 change_mask;
1205 u32 value;
1206 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1207 IB_LINK_LAYER_INFINIBAND);
1208
Majd Dibbinyec255872017-08-23 08:35:42 +03001209 /* CM layer calls ib_modify_port() regardless of the link layer. For
1210 * Ethernet ports, qkey violation and Port capabilities are meaningless.
1211 */
1212 if (!is_ib)
1213 return 0;
1214
Eli Cohencdbe33d2017-02-14 07:25:38 +02001215 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1216 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1217 value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1218 return set_port_caps_atomic(dev, port, change_mask, value);
1219 }
Eli Cohene126ba92013-07-07 17:25:49 +03001220
1221 mutex_lock(&dev->cap_mask_mutex);
1222
Or Gerlitzc4550c62017-01-24 13:02:39 +02001223 err = ib_query_port(ibdev, port, &attr);
Eli Cohene126ba92013-07-07 17:25:49 +03001224 if (err)
1225 goto out;
1226
1227 tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1228 ~props->clr_port_cap_mask;
1229
Jack Morgenstein9603b612014-07-28 23:30:22 +03001230 err = mlx5_set_port_caps(dev->mdev, port, tmp);
Eli Cohene126ba92013-07-07 17:25:49 +03001231
1232out:
1233 mutex_unlock(&dev->cap_mask_mutex);
1234 return err;
1235}
1236
Eli Cohen30aa60b2017-01-03 23:55:27 +02001237static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1238{
1239 mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1240 caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1241}
1242
Yishai Hadas31a78a52017-12-24 16:31:34 +02001243static u16 calc_dynamic_bfregs(int uars_per_sys_page)
1244{
1245 /* Large page with non 4k uar support might limit the dynamic size */
1246 if (uars_per_sys_page == 1 && PAGE_SIZE > 4096)
1247 return MLX5_MIN_DYN_BFREGS;
1248
1249 return MLX5_MAX_DYN_BFREGS;
1250}
1251
Eli Cohenb037c292017-01-03 23:55:26 +02001252static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1253 struct mlx5_ib_alloc_ucontext_req_v2 *req,
Yishai Hadas31a78a52017-12-24 16:31:34 +02001254 struct mlx5_bfreg_info *bfregi)
Eli Cohenb037c292017-01-03 23:55:26 +02001255{
1256 int uars_per_sys_page;
1257 int bfregs_per_sys_page;
1258 int ref_bfregs = req->total_num_bfregs;
1259
1260 if (req->total_num_bfregs == 0)
1261 return -EINVAL;
1262
1263 BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1264 BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1265
1266 if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1267 return -ENOMEM;
1268
1269 uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1270 bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
Yishai Hadas31a78a52017-12-24 16:31:34 +02001271 /* This holds the required static allocation asked by the user */
Eli Cohenb037c292017-01-03 23:55:26 +02001272 req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
Eli Cohenb037c292017-01-03 23:55:26 +02001273 if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1274 return -EINVAL;
1275
Yishai Hadas31a78a52017-12-24 16:31:34 +02001276 bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1277 bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
1278 bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
1279 bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
1280
1281 mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
Eli Cohenb037c292017-01-03 23:55:26 +02001282 MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1283 lib_uar_4k ? "yes" : "no", ref_bfregs,
Yishai Hadas31a78a52017-12-24 16:31:34 +02001284 req->total_num_bfregs, bfregi->total_num_bfregs,
1285 bfregi->num_sys_pages);
Eli Cohenb037c292017-01-03 23:55:26 +02001286
1287 return 0;
1288}
1289
1290static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1291{
1292 struct mlx5_bfreg_info *bfregi;
1293 int err;
1294 int i;
1295
1296 bfregi = &context->bfregi;
Yishai Hadas31a78a52017-12-24 16:31:34 +02001297 for (i = 0; i < bfregi->num_static_sys_pages; i++) {
Eli Cohenb037c292017-01-03 23:55:26 +02001298 err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
1299 if (err)
1300 goto error;
1301
1302 mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1303 }
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001304
1305 for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
1306 bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
1307
Eli Cohenb037c292017-01-03 23:55:26 +02001308 return 0;
1309
1310error:
1311 for (--i; i >= 0; i--)
1312 if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
1313 mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1314
1315 return err;
1316}
1317
1318static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1319{
1320 struct mlx5_bfreg_info *bfregi;
1321 int err;
1322 int i;
1323
1324 bfregi = &context->bfregi;
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001325 for (i = 0; i < bfregi->num_sys_pages; i++) {
1326 if (i < bfregi->num_static_sys_pages ||
1327 bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) {
1328 err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
1329 if (err) {
1330 mlx5_ib_warn(dev, "failed to free uar %d, err=%d\n", i, err);
1331 return err;
1332 }
Eli Cohenb037c292017-01-03 23:55:26 +02001333 }
1334 }
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001335
Eli Cohenb037c292017-01-03 23:55:26 +02001336 return 0;
1337}
1338
Huy Nguyenc85023e2017-05-30 09:42:54 +03001339static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
1340{
1341 int err;
1342
1343 err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
1344 if (err)
1345 return err;
1346
1347 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1348 !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
1349 return err;
1350
1351 mutex_lock(&dev->lb_mutex);
1352 dev->user_td++;
1353
1354 if (dev->user_td == 2)
1355 err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1356
1357 mutex_unlock(&dev->lb_mutex);
1358 return err;
1359}
1360
1361static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
1362{
1363 mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
1364
1365 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1366 !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
1367 return;
1368
1369 mutex_lock(&dev->lb_mutex);
1370 dev->user_td--;
1371
1372 if (dev->user_td < 2)
1373 mlx5_nic_vport_update_local_lb(dev->mdev, false);
1374
1375 mutex_unlock(&dev->lb_mutex);
1376}
1377
Eli Cohene126ba92013-07-07 17:25:49 +03001378static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1379 struct ib_udata *udata)
1380{
1381 struct mlx5_ib_dev *dev = to_mdev(ibdev);
Matan Barakb368d7c2015-12-15 20:30:12 +02001382 struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1383 struct mlx5_ib_alloc_ucontext_resp resp = {};
Eli Cohene126ba92013-07-07 17:25:49 +03001384 struct mlx5_ib_ucontext *context;
Eli Cohen2f5ff262017-01-03 23:55:21 +02001385 struct mlx5_bfreg_info *bfregi;
Eli Cohen78c0f982014-01-30 13:49:48 +02001386 int ver;
Eli Cohene126ba92013-07-07 17:25:49 +03001387 int err;
Majd Dibbinya168a41c2016-01-28 17:51:47 +02001388 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1389 max_cqe_version);
Eli Cohenb037c292017-01-03 23:55:26 +02001390 bool lib_uar_4k;
Eli Cohene126ba92013-07-07 17:25:49 +03001391
1392 if (!dev->ib_active)
1393 return ERR_PTR(-EAGAIN);
1394
Amrani, Rame0931112017-06-27 17:04:42 +03001395 if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
Eli Cohen78c0f982014-01-30 13:49:48 +02001396 ver = 0;
Amrani, Rame0931112017-06-27 17:04:42 +03001397 else if (udata->inlen >= min_req_v2)
Eli Cohen78c0f982014-01-30 13:49:48 +02001398 ver = 2;
1399 else
1400 return ERR_PTR(-EINVAL);
1401
Amrani, Rame0931112017-06-27 17:04:42 +03001402 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
Eli Cohene126ba92013-07-07 17:25:49 +03001403 if (err)
1404 return ERR_PTR(err);
1405
Matan Barakb368d7c2015-12-15 20:30:12 +02001406 if (req.flags)
Eli Cohen78c0f982014-01-30 13:49:48 +02001407 return ERR_PTR(-EINVAL);
1408
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001409 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
Matan Barakb368d7c2015-12-15 20:30:12 +02001410 return ERR_PTR(-EOPNOTSUPP);
1411
Eli Cohen2f5ff262017-01-03 23:55:21 +02001412 req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1413 MLX5_NON_FP_BFREGS_PER_UAR);
1414 if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
Eli Cohene126ba92013-07-07 17:25:49 +03001415 return ERR_PTR(-EINVAL);
1416
Saeed Mahameed938fe832015-05-28 22:28:41 +03001417 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
Noa Osherovich2cc6ad52016-06-04 15:15:33 +03001418 if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
1419 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
Daniel Jurgensb47bd6e2016-10-25 18:36:24 +03001420 resp.cache_line_size = cache_line_size();
Saeed Mahameed938fe832015-05-28 22:28:41 +03001421 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1422 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1423 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1424 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1425 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001426 resp.cqe_version = min_t(__u8,
1427 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1428 req.max_cqe_version);
Eli Cohen30aa60b2017-01-03 23:55:27 +02001429 resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1430 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1431 resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1432 MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
Matan Barakb368d7c2015-12-15 20:30:12 +02001433 resp.response_length = min(offsetof(typeof(resp), response_length) +
1434 sizeof(resp.response_length), udata->outlen);
Eli Cohene126ba92013-07-07 17:25:49 +03001435
1436 context = kzalloc(sizeof(*context), GFP_KERNEL);
1437 if (!context)
1438 return ERR_PTR(-ENOMEM);
1439
Eli Cohen30aa60b2017-01-03 23:55:27 +02001440 lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
Eli Cohen2f5ff262017-01-03 23:55:21 +02001441 bfregi = &context->bfregi;
Eli Cohenb037c292017-01-03 23:55:26 +02001442
1443 /* updates req->total_num_bfregs */
Yishai Hadas31a78a52017-12-24 16:31:34 +02001444 err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
Eli Cohenb037c292017-01-03 23:55:26 +02001445 if (err)
1446 goto out_ctx;
1447
Eli Cohen2f5ff262017-01-03 23:55:21 +02001448 mutex_init(&bfregi->lock);
Eli Cohenb037c292017-01-03 23:55:26 +02001449 bfregi->lib_uar_4k = lib_uar_4k;
Yishai Hadas31a78a52017-12-24 16:31:34 +02001450 bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
Eli Cohenb037c292017-01-03 23:55:26 +02001451 GFP_KERNEL);
1452 if (!bfregi->count) {
Eli Cohene126ba92013-07-07 17:25:49 +03001453 err = -ENOMEM;
1454 goto out_ctx;
1455 }
1456
Eli Cohenb037c292017-01-03 23:55:26 +02001457 bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
1458 sizeof(*bfregi->sys_pages),
1459 GFP_KERNEL);
1460 if (!bfregi->sys_pages) {
Eli Cohene126ba92013-07-07 17:25:49 +03001461 err = -ENOMEM;
Eli Cohenb037c292017-01-03 23:55:26 +02001462 goto out_count;
Eli Cohene126ba92013-07-07 17:25:49 +03001463 }
1464
Eli Cohenb037c292017-01-03 23:55:26 +02001465 err = allocate_uars(dev, context);
1466 if (err)
1467 goto out_sys_pages;
Eli Cohene126ba92013-07-07 17:25:49 +03001468
Haggai Eranb4cfe442014-12-11 17:04:26 +02001469#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1470 context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
1471#endif
1472
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001473 context->upd_xlt_page = __get_free_page(GFP_KERNEL);
1474 if (!context->upd_xlt_page) {
1475 err = -ENOMEM;
1476 goto out_uars;
1477 }
1478 mutex_init(&context->upd_xlt_page_mutex);
1479
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001480 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
Huy Nguyenc85023e2017-05-30 09:42:54 +03001481 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001482 if (err)
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001483 goto out_page;
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001484 }
1485
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001486 INIT_LIST_HEAD(&context->vma_private_list);
Majd Dibbinyad9a3662017-12-24 13:54:56 +02001487 mutex_init(&context->vma_private_list_mutex);
Eli Cohene126ba92013-07-07 17:25:49 +03001488 INIT_LIST_HEAD(&context->db_page_list);
1489 mutex_init(&context->db_page_mutex);
1490
Eli Cohen2f5ff262017-01-03 23:55:21 +02001491 resp.tot_bfregs = req.total_num_bfregs;
Saeed Mahameed938fe832015-05-28 22:28:41 +03001492 resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
Matan Barakb368d7c2015-12-15 20:30:12 +02001493
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001494 if (field_avail(typeof(resp), cqe_version, udata->outlen))
1495 resp.response_length += sizeof(resp.cqe_version);
Matan Barakb368d7c2015-12-15 20:30:12 +02001496
Bodong Wang402ca532016-06-17 15:02:20 +03001497 if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
Moni Shoua6ad279c52016-11-23 08:23:23 +02001498 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1499 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
Bodong Wang402ca532016-06-17 15:02:20 +03001500 resp.response_length += sizeof(resp.cmds_supp_uhw);
1501 }
1502
Or Gerlitz78984892016-11-30 20:33:33 +02001503 if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
1504 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1505 mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
1506 resp.eth_min_inline++;
1507 }
1508 resp.response_length += sizeof(resp.eth_min_inline);
1509 }
1510
Noa Osherovichbc5c6ee2016-06-04 15:15:31 +03001511 /*
1512 * We don't want to expose information from the PCI bar that is located
1513 * after 4096 bytes, so if the arch only supports larger pages, let's
1514 * pretend we don't support reading the HCA's core clock. This is also
1515 * forced by mmap function.
1516 */
Eli Cohende8d6e02017-01-03 23:55:19 +02001517 if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
1518 if (PAGE_SIZE <= 4096) {
1519 resp.comp_mask |=
1520 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1521 resp.hca_core_clock_offset =
1522 offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
1523 }
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001524 resp.response_length += sizeof(resp.hca_core_clock_offset) +
Bodong Wang402ca532016-06-17 15:02:20 +03001525 sizeof(resp.reserved2);
Matan Barakb368d7c2015-12-15 20:30:12 +02001526 }
1527
Eli Cohen30aa60b2017-01-03 23:55:27 +02001528 if (field_avail(typeof(resp), log_uar_size, udata->outlen))
1529 resp.response_length += sizeof(resp.log_uar_size);
1530
1531 if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
1532 resp.response_length += sizeof(resp.num_uars_per_page);
1533
Yishai Hadas31a78a52017-12-24 16:31:34 +02001534 if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
1535 resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
1536 resp.response_length += sizeof(resp.num_dyn_bfregs);
1537 }
1538
Matan Barakb368d7c2015-12-15 20:30:12 +02001539 err = ib_copy_to_udata(udata, &resp, resp.response_length);
Eli Cohene126ba92013-07-07 17:25:49 +03001540 if (err)
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001541 goto out_td;
Eli Cohene126ba92013-07-07 17:25:49 +03001542
Eli Cohen2f5ff262017-01-03 23:55:21 +02001543 bfregi->ver = ver;
1544 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001545 context->cqe_version = resp.cqe_version;
Eli Cohen30aa60b2017-01-03 23:55:27 +02001546 context->lib_caps = req.lib_caps;
1547 print_lib_caps(dev, context->lib_caps);
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001548
Eli Cohene126ba92013-07-07 17:25:49 +03001549 return &context->ibucontext;
1550
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001551out_td:
1552 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
Huy Nguyenc85023e2017-05-30 09:42:54 +03001553 mlx5_ib_dealloc_transport_domain(dev, context->tdn);
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001554
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001555out_page:
1556 free_page(context->upd_xlt_page);
1557
Eli Cohene126ba92013-07-07 17:25:49 +03001558out_uars:
Eli Cohenb037c292017-01-03 23:55:26 +02001559 deallocate_uars(dev, context);
1560
1561out_sys_pages:
1562 kfree(bfregi->sys_pages);
1563
Eli Cohene126ba92013-07-07 17:25:49 +03001564out_count:
Eli Cohen2f5ff262017-01-03 23:55:21 +02001565 kfree(bfregi->count);
Eli Cohene126ba92013-07-07 17:25:49 +03001566
Eli Cohene126ba92013-07-07 17:25:49 +03001567out_ctx:
1568 kfree(context);
Eli Cohenb037c292017-01-03 23:55:26 +02001569
Eli Cohene126ba92013-07-07 17:25:49 +03001570 return ERR_PTR(err);
1571}
1572
1573static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1574{
1575 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1576 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
Eli Cohenb037c292017-01-03 23:55:26 +02001577 struct mlx5_bfreg_info *bfregi;
Eli Cohene126ba92013-07-07 17:25:49 +03001578
Eli Cohenb037c292017-01-03 23:55:26 +02001579 bfregi = &context->bfregi;
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001580 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
Huy Nguyenc85023e2017-05-30 09:42:54 +03001581 mlx5_ib_dealloc_transport_domain(dev, context->tdn);
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001582
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001583 free_page(context->upd_xlt_page);
Eli Cohenb037c292017-01-03 23:55:26 +02001584 deallocate_uars(dev, context);
1585 kfree(bfregi->sys_pages);
Eli Cohen2f5ff262017-01-03 23:55:21 +02001586 kfree(bfregi->count);
Eli Cohene126ba92013-07-07 17:25:49 +03001587 kfree(context);
1588
1589 return 0;
1590}
1591
Eli Cohenb037c292017-01-03 23:55:26 +02001592static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001593 int uar_idx)
Eli Cohene126ba92013-07-07 17:25:49 +03001594{
Eli Cohenb037c292017-01-03 23:55:26 +02001595 int fw_uars_per_page;
1596
1597 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
1598
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001599 return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
Eli Cohene126ba92013-07-07 17:25:49 +03001600}
1601
1602static int get_command(unsigned long offset)
1603{
1604 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
1605}
1606
1607static int get_arg(unsigned long offset)
1608{
1609 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
1610}
1611
1612static int get_index(unsigned long offset)
1613{
1614 return get_arg(offset);
1615}
1616
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001617/* Index resides in an extra byte to enable larger values than 255 */
1618static int get_extended_index(unsigned long offset)
1619{
1620 return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
1621}
1622
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001623static void mlx5_ib_vma_open(struct vm_area_struct *area)
1624{
1625 /* vma_open is called when a new VMA is created on top of our VMA. This
1626 * is done through either mremap flow or split_vma (usually due to
1627 * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
1628 * as this VMA is strongly hardware related. Therefore we set the
1629 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1630 * calling us again and trying to do incorrect actions. We assume that
1631 * the original VMA size is exactly a single page, and therefore all
1632 * "splitting" operation will not happen to it.
1633 */
1634 area->vm_ops = NULL;
1635}
1636
1637static void mlx5_ib_vma_close(struct vm_area_struct *area)
1638{
1639 struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
1640
1641 /* It's guaranteed that all VMAs opened on a FD are closed before the
1642 * file itself is closed, therefore no sync is needed with the regular
1643 * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
1644 * However need a sync with accessing the vma as part of
1645 * mlx5_ib_disassociate_ucontext.
1646 * The close operation is usually called under mm->mmap_sem except when
1647 * process is exiting.
1648 * The exiting case is handled explicitly as part of
1649 * mlx5_ib_disassociate_ucontext.
1650 */
1651 mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
1652
1653 /* setting the vma context pointer to null in the mlx5_ib driver's
1654 * private data, to protect a race condition in
1655 * mlx5_ib_disassociate_ucontext().
1656 */
1657 mlx5_ib_vma_priv_data->vma = NULL;
Majd Dibbinyad9a3662017-12-24 13:54:56 +02001658 mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001659 list_del(&mlx5_ib_vma_priv_data->list);
Majd Dibbinyad9a3662017-12-24 13:54:56 +02001660 mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001661 kfree(mlx5_ib_vma_priv_data);
1662}
1663
1664static const struct vm_operations_struct mlx5_ib_vm_ops = {
1665 .open = mlx5_ib_vma_open,
1666 .close = mlx5_ib_vma_close
1667};
1668
1669static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
1670 struct mlx5_ib_ucontext *ctx)
1671{
1672 struct mlx5_ib_vma_private_data *vma_prv;
1673 struct list_head *vma_head = &ctx->vma_private_list;
1674
1675 vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
1676 if (!vma_prv)
1677 return -ENOMEM;
1678
1679 vma_prv->vma = vma;
Majd Dibbinyad9a3662017-12-24 13:54:56 +02001680 vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001681 vma->vm_private_data = vma_prv;
1682 vma->vm_ops = &mlx5_ib_vm_ops;
1683
Majd Dibbinyad9a3662017-12-24 13:54:56 +02001684 mutex_lock(&ctx->vma_private_list_mutex);
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001685 list_add(&vma_prv->list, vma_head);
Majd Dibbinyad9a3662017-12-24 13:54:56 +02001686 mutex_unlock(&ctx->vma_private_list_mutex);
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001687
1688 return 0;
1689}
1690
1691static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1692{
1693 int ret;
1694 struct vm_area_struct *vma;
1695 struct mlx5_ib_vma_private_data *vma_private, *n;
1696 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1697 struct task_struct *owning_process = NULL;
1698 struct mm_struct *owning_mm = NULL;
1699
1700 owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
1701 if (!owning_process)
1702 return;
1703
1704 owning_mm = get_task_mm(owning_process);
1705 if (!owning_mm) {
1706 pr_info("no mm, disassociate ucontext is pending task termination\n");
1707 while (1) {
1708 put_task_struct(owning_process);
1709 usleep_range(1000, 2000);
1710 owning_process = get_pid_task(ibcontext->tgid,
1711 PIDTYPE_PID);
1712 if (!owning_process ||
1713 owning_process->state == TASK_DEAD) {
1714 pr_info("disassociate ucontext done, task was terminated\n");
1715 /* in case task was dead need to release the
1716 * task struct.
1717 */
1718 if (owning_process)
1719 put_task_struct(owning_process);
1720 return;
1721 }
1722 }
1723 }
1724
1725 /* need to protect from a race on closing the vma as part of
1726 * mlx5_ib_vma_close.
1727 */
Maor Gottliebecc7d832017-03-29 06:03:02 +03001728 down_write(&owning_mm->mmap_sem);
Majd Dibbinyad9a3662017-12-24 13:54:56 +02001729 mutex_lock(&context->vma_private_list_mutex);
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001730 list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
1731 list) {
1732 vma = vma_private->vma;
1733 ret = zap_vma_ptes(vma, vma->vm_start,
1734 PAGE_SIZE);
1735 WARN_ONCE(ret, "%s: zap_vma_ptes failed", __func__);
1736 /* context going to be destroyed, should
1737 * not access ops any more.
1738 */
Maor Gottlieb13776612017-03-29 06:03:03 +03001739 vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001740 vma->vm_ops = NULL;
1741 list_del(&vma_private->list);
1742 kfree(vma_private);
1743 }
Majd Dibbinyad9a3662017-12-24 13:54:56 +02001744 mutex_unlock(&context->vma_private_list_mutex);
Maor Gottliebecc7d832017-03-29 06:03:02 +03001745 up_write(&owning_mm->mmap_sem);
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001746 mmput(owning_mm);
1747 put_task_struct(owning_process);
1748}
1749
Guy Levi37aa5c32016-04-27 16:49:50 +03001750static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
1751{
1752 switch (cmd) {
1753 case MLX5_IB_MMAP_WC_PAGE:
1754 return "WC";
1755 case MLX5_IB_MMAP_REGULAR_PAGE:
1756 return "best effort WC";
1757 case MLX5_IB_MMAP_NC_PAGE:
1758 return "NC";
1759 default:
1760 return NULL;
1761 }
1762}
1763
1764static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001765 struct vm_area_struct *vma,
1766 struct mlx5_ib_ucontext *context)
Guy Levi37aa5c32016-04-27 16:49:50 +03001767{
Eli Cohen2f5ff262017-01-03 23:55:21 +02001768 struct mlx5_bfreg_info *bfregi = &context->bfregi;
Guy Levi37aa5c32016-04-27 16:49:50 +03001769 int err;
1770 unsigned long idx;
1771 phys_addr_t pfn, pa;
1772 pgprot_t prot;
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001773 u32 bfreg_dyn_idx = 0;
1774 u32 uar_index;
1775 int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
1776 int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
1777 bfregi->num_static_sys_pages;
Eli Cohenb037c292017-01-03 23:55:26 +02001778
1779 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1780 return -EINVAL;
1781
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001782 if (dyn_uar)
1783 idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
1784 else
1785 idx = get_index(vma->vm_pgoff);
1786
1787 if (idx >= max_valid_idx) {
1788 mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
1789 idx, max_valid_idx);
Eli Cohenb037c292017-01-03 23:55:26 +02001790 return -EINVAL;
1791 }
Guy Levi37aa5c32016-04-27 16:49:50 +03001792
1793 switch (cmd) {
1794 case MLX5_IB_MMAP_WC_PAGE:
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001795 case MLX5_IB_MMAP_ALLOC_WC:
Guy Levi37aa5c32016-04-27 16:49:50 +03001796/* Some architectures don't support WC memory */
1797#if defined(CONFIG_X86)
1798 if (!pat_enabled())
1799 return -EPERM;
1800#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
1801 return -EPERM;
1802#endif
1803 /* fall through */
1804 case MLX5_IB_MMAP_REGULAR_PAGE:
1805 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
1806 prot = pgprot_writecombine(vma->vm_page_prot);
1807 break;
1808 case MLX5_IB_MMAP_NC_PAGE:
1809 prot = pgprot_noncached(vma->vm_page_prot);
1810 break;
1811 default:
1812 return -EINVAL;
1813 }
1814
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001815 if (dyn_uar) {
1816 int uars_per_page;
1817
1818 uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
1819 bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
1820 if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
1821 mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
1822 bfreg_dyn_idx, bfregi->total_num_bfregs);
1823 return -EINVAL;
1824 }
1825
1826 mutex_lock(&bfregi->lock);
1827 /* Fail if uar already allocated, first bfreg index of each
1828 * page holds its count.
1829 */
1830 if (bfregi->count[bfreg_dyn_idx]) {
1831 mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
1832 mutex_unlock(&bfregi->lock);
1833 return -EINVAL;
1834 }
1835
1836 bfregi->count[bfreg_dyn_idx]++;
1837 mutex_unlock(&bfregi->lock);
1838
1839 err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
1840 if (err) {
1841 mlx5_ib_warn(dev, "UAR alloc failed\n");
1842 goto free_bfreg;
1843 }
1844 } else {
1845 uar_index = bfregi->sys_pages[idx];
1846 }
1847
1848 pfn = uar_index2pfn(dev, uar_index);
Guy Levi37aa5c32016-04-27 16:49:50 +03001849 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
1850
1851 vma->vm_page_prot = prot;
1852 err = io_remap_pfn_range(vma, vma->vm_start, pfn,
1853 PAGE_SIZE, vma->vm_page_prot);
1854 if (err) {
1855 mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
1856 err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001857 err = -EAGAIN;
1858 goto err;
Guy Levi37aa5c32016-04-27 16:49:50 +03001859 }
1860
1861 pa = pfn << PAGE_SHIFT;
1862 mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
1863 vma->vm_start, &pa);
1864
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001865 err = mlx5_ib_set_vma_data(vma, context);
1866 if (err)
1867 goto err;
1868
1869 if (dyn_uar)
1870 bfregi->sys_pages[idx] = uar_index;
1871 return 0;
1872
1873err:
1874 if (!dyn_uar)
1875 return err;
1876
1877 mlx5_cmd_free_uar(dev->mdev, idx);
1878
1879free_bfreg:
1880 mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
1881
1882 return err;
Guy Levi37aa5c32016-04-27 16:49:50 +03001883}
1884
Eli Cohene126ba92013-07-07 17:25:49 +03001885static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
1886{
1887 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1888 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
Eli Cohene126ba92013-07-07 17:25:49 +03001889 unsigned long command;
Eli Cohene126ba92013-07-07 17:25:49 +03001890 phys_addr_t pfn;
1891
1892 command = get_command(vma->vm_pgoff);
1893 switch (command) {
Guy Levi37aa5c32016-04-27 16:49:50 +03001894 case MLX5_IB_MMAP_WC_PAGE:
1895 case MLX5_IB_MMAP_NC_PAGE:
Eli Cohene126ba92013-07-07 17:25:49 +03001896 case MLX5_IB_MMAP_REGULAR_PAGE:
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001897 case MLX5_IB_MMAP_ALLOC_WC:
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001898 return uar_mmap(dev, command, vma, context);
Eli Cohene126ba92013-07-07 17:25:49 +03001899
1900 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
1901 return -ENOSYS;
1902
Matan Barakd69e3bc2015-12-15 20:30:13 +02001903 case MLX5_IB_MMAP_CORE_CLOCK:
Matan Barakd69e3bc2015-12-15 20:30:13 +02001904 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1905 return -EINVAL;
1906
Matan Barak6cbac1e2016-04-14 16:52:10 +03001907 if (vma->vm_flags & VM_WRITE)
Matan Barakd69e3bc2015-12-15 20:30:13 +02001908 return -EPERM;
1909
1910 /* Don't expose to user-space information it shouldn't have */
1911 if (PAGE_SIZE > 4096)
1912 return -EOPNOTSUPP;
1913
1914 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1915 pfn = (dev->mdev->iseg_base +
1916 offsetof(struct mlx5_init_seg, internal_timer_h)) >>
1917 PAGE_SHIFT;
1918 if (io_remap_pfn_range(vma, vma->vm_start, pfn,
1919 PAGE_SIZE, vma->vm_page_prot))
1920 return -EAGAIN;
1921
1922 mlx5_ib_dbg(dev, "mapped internal timer at 0x%lx, PA 0x%llx\n",
1923 vma->vm_start,
1924 (unsigned long long)pfn << PAGE_SHIFT);
1925 break;
Matan Barakd69e3bc2015-12-15 20:30:13 +02001926
Eli Cohene126ba92013-07-07 17:25:49 +03001927 default:
1928 return -EINVAL;
1929 }
1930
1931 return 0;
1932}
1933
Eli Cohene126ba92013-07-07 17:25:49 +03001934static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
1935 struct ib_ucontext *context,
1936 struct ib_udata *udata)
1937{
1938 struct mlx5_ib_alloc_pd_resp resp;
1939 struct mlx5_ib_pd *pd;
1940 int err;
1941
1942 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
1943 if (!pd)
1944 return ERR_PTR(-ENOMEM);
1945
Jack Morgenstein9603b612014-07-28 23:30:22 +03001946 err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
Eli Cohene126ba92013-07-07 17:25:49 +03001947 if (err) {
1948 kfree(pd);
1949 return ERR_PTR(err);
1950 }
1951
1952 if (context) {
1953 resp.pdn = pd->pdn;
1954 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001955 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
Eli Cohene126ba92013-07-07 17:25:49 +03001956 kfree(pd);
1957 return ERR_PTR(-EFAULT);
1958 }
Eli Cohene126ba92013-07-07 17:25:49 +03001959 }
1960
1961 return &pd->ibpd;
1962}
1963
1964static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
1965{
1966 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
1967 struct mlx5_ib_pd *mpd = to_mpd(pd);
1968
Jack Morgenstein9603b612014-07-28 23:30:22 +03001969 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
Eli Cohene126ba92013-07-07 17:25:49 +03001970 kfree(mpd);
1971
1972 return 0;
1973}
1974
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001975enum {
1976 MATCH_CRITERIA_ENABLE_OUTER_BIT,
1977 MATCH_CRITERIA_ENABLE_MISC_BIT,
1978 MATCH_CRITERIA_ENABLE_INNER_BIT
1979};
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001980
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001981#define HEADER_IS_ZERO(match_criteria, headers) \
1982 !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
1983 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
1984
1985static u8 get_match_criteria_enable(u32 *match_criteria)
1986{
1987 u8 match_criteria_enable;
1988
1989 match_criteria_enable =
1990 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
1991 MATCH_CRITERIA_ENABLE_OUTER_BIT;
1992 match_criteria_enable |=
1993 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
1994 MATCH_CRITERIA_ENABLE_MISC_BIT;
1995 match_criteria_enable |=
1996 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
1997 MATCH_CRITERIA_ENABLE_INNER_BIT;
1998
1999 return match_criteria_enable;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002000}
2001
Maor Gottliebca0d4752016-08-30 16:58:35 +03002002static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
2003{
2004 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
2005 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
2006}
2007
Moses Reuben2d1e6972016-11-14 19:04:52 +02002008static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val,
2009 bool inner)
2010{
2011 if (inner) {
2012 MLX5_SET(fte_match_set_misc,
2013 misc_c, inner_ipv6_flow_label, mask);
2014 MLX5_SET(fte_match_set_misc,
2015 misc_v, inner_ipv6_flow_label, val);
2016 } else {
2017 MLX5_SET(fte_match_set_misc,
2018 misc_c, outer_ipv6_flow_label, mask);
2019 MLX5_SET(fte_match_set_misc,
2020 misc_v, outer_ipv6_flow_label, val);
2021 }
2022}
2023
Maor Gottliebca0d4752016-08-30 16:58:35 +03002024static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
2025{
2026 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
2027 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
2028 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
2029 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
2030}
2031
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03002032#define LAST_ETH_FIELD vlan_tag
2033#define LAST_IB_FIELD sl
Maor Gottliebca0d4752016-08-30 16:58:35 +03002034#define LAST_IPV4_FIELD tos
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002035#define LAST_IPV6_FIELD traffic_class
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03002036#define LAST_TCP_UDP_FIELD src_port
Moses Reubenffb30d82016-11-14 19:04:50 +02002037#define LAST_TUNNEL_FIELD tunnel_id
Moses Reuben2ac693f2017-01-18 14:59:50 +02002038#define LAST_FLOW_TAG_FIELD tag_id
Slava Shwartsmana22ed862017-04-03 13:13:52 +03002039#define LAST_DROP_FIELD size
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03002040
2041/* Field is the last supported field */
2042#define FIELDS_NOT_SUPPORTED(filter, field)\
2043 memchr_inv((void *)&filter.field +\
2044 sizeof(filter.field), 0,\
2045 sizeof(filter) -\
2046 offsetof(typeof(filter), field) -\
2047 sizeof(filter.field))
2048
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002049#define IPV4_VERSION 4
2050#define IPV6_VERSION 6
2051static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
2052 u32 *match_v, const union ib_flow_spec *ib_spec,
Slava Shwartsmana22ed862017-04-03 13:13:52 +03002053 u32 *tag_id, bool *is_drop)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002054{
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002055 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
2056 misc_parameters);
2057 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
2058 misc_parameters);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002059 void *headers_c;
2060 void *headers_v;
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002061 int match_ipv;
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002062
Moses Reuben2d1e6972016-11-14 19:04:52 +02002063 if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2064 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2065 inner_headers);
2066 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2067 inner_headers);
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002068 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2069 ft_field_support.inner_ip_version);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002070 } else {
2071 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2072 outer_headers);
2073 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2074 outer_headers);
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002075 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2076 ft_field_support.outer_ip_version);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002077 }
2078
2079 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002080 case IB_FLOW_SPEC_ETH:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03002081 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02002082 return -EOPNOTSUPP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002083
Moses Reuben2d1e6972016-11-14 19:04:52 +02002084 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002085 dmac_47_16),
2086 ib_spec->eth.mask.dst_mac);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002087 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002088 dmac_47_16),
2089 ib_spec->eth.val.dst_mac);
2090
Moses Reuben2d1e6972016-11-14 19:04:52 +02002091 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottliebee3da802016-09-12 19:16:24 +03002092 smac_47_16),
2093 ib_spec->eth.mask.src_mac);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002094 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottliebee3da802016-09-12 19:16:24 +03002095 smac_47_16),
2096 ib_spec->eth.val.src_mac);
2097
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002098 if (ib_spec->eth.mask.vlan_tag) {
Moses Reuben2d1e6972016-11-14 19:04:52 +02002099 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Mohamad Haj Yahia10543362016-10-09 16:25:43 +03002100 cvlan_tag, 1);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002101 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Mohamad Haj Yahia10543362016-10-09 16:25:43 +03002102 cvlan_tag, 1);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002103
Moses Reuben2d1e6972016-11-14 19:04:52 +02002104 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002105 first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002106 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002107 first_vid, ntohs(ib_spec->eth.val.vlan_tag));
2108
Moses Reuben2d1e6972016-11-14 19:04:52 +02002109 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002110 first_cfi,
2111 ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002112 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002113 first_cfi,
2114 ntohs(ib_spec->eth.val.vlan_tag) >> 12);
2115
Moses Reuben2d1e6972016-11-14 19:04:52 +02002116 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002117 first_prio,
2118 ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002119 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002120 first_prio,
2121 ntohs(ib_spec->eth.val.vlan_tag) >> 13);
2122 }
Moses Reuben2d1e6972016-11-14 19:04:52 +02002123 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002124 ethertype, ntohs(ib_spec->eth.mask.ether_type));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002125 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002126 ethertype, ntohs(ib_spec->eth.val.ether_type));
2127 break;
2128 case IB_FLOW_SPEC_IPV4:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03002129 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02002130 return -EOPNOTSUPP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002131
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002132 if (match_ipv) {
2133 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2134 ip_version, 0xf);
2135 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2136 ip_version, IPV4_VERSION);
2137 } else {
2138 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2139 ethertype, 0xffff);
2140 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2141 ethertype, ETH_P_IP);
2142 }
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002143
Moses Reuben2d1e6972016-11-14 19:04:52 +02002144 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002145 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2146 &ib_spec->ipv4.mask.src_ip,
2147 sizeof(ib_spec->ipv4.mask.src_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002148 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002149 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2150 &ib_spec->ipv4.val.src_ip,
2151 sizeof(ib_spec->ipv4.val.src_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002152 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002153 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2154 &ib_spec->ipv4.mask.dst_ip,
2155 sizeof(ib_spec->ipv4.mask.dst_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002156 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002157 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2158 &ib_spec->ipv4.val.dst_ip,
2159 sizeof(ib_spec->ipv4.val.dst_ip));
Maor Gottliebca0d4752016-08-30 16:58:35 +03002160
Moses Reuben2d1e6972016-11-14 19:04:52 +02002161 set_tos(headers_c, headers_v,
Maor Gottliebca0d4752016-08-30 16:58:35 +03002162 ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
2163
Moses Reuben2d1e6972016-11-14 19:04:52 +02002164 set_proto(headers_c, headers_v,
Maor Gottliebca0d4752016-08-30 16:58:35 +03002165 ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002166 break;
Maor Gottlieb026bae02016-06-17 15:14:51 +03002167 case IB_FLOW_SPEC_IPV6:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03002168 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02002169 return -EOPNOTSUPP;
Maor Gottlieb026bae02016-06-17 15:14:51 +03002170
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002171 if (match_ipv) {
2172 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2173 ip_version, 0xf);
2174 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2175 ip_version, IPV6_VERSION);
2176 } else {
2177 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2178 ethertype, 0xffff);
2179 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2180 ethertype, ETH_P_IPV6);
2181 }
Maor Gottlieb026bae02016-06-17 15:14:51 +03002182
Moses Reuben2d1e6972016-11-14 19:04:52 +02002183 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb026bae02016-06-17 15:14:51 +03002184 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2185 &ib_spec->ipv6.mask.src_ip,
2186 sizeof(ib_spec->ipv6.mask.src_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002187 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb026bae02016-06-17 15:14:51 +03002188 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2189 &ib_spec->ipv6.val.src_ip,
2190 sizeof(ib_spec->ipv6.val.src_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002191 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb026bae02016-06-17 15:14:51 +03002192 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2193 &ib_spec->ipv6.mask.dst_ip,
2194 sizeof(ib_spec->ipv6.mask.dst_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002195 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb026bae02016-06-17 15:14:51 +03002196 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2197 &ib_spec->ipv6.val.dst_ip,
2198 sizeof(ib_spec->ipv6.val.dst_ip));
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002199
Moses Reuben2d1e6972016-11-14 19:04:52 +02002200 set_tos(headers_c, headers_v,
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002201 ib_spec->ipv6.mask.traffic_class,
2202 ib_spec->ipv6.val.traffic_class);
2203
Moses Reuben2d1e6972016-11-14 19:04:52 +02002204 set_proto(headers_c, headers_v,
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002205 ib_spec->ipv6.mask.next_hdr,
2206 ib_spec->ipv6.val.next_hdr);
2207
Moses Reuben2d1e6972016-11-14 19:04:52 +02002208 set_flow_label(misc_params_c, misc_params_v,
2209 ntohl(ib_spec->ipv6.mask.flow_label),
2210 ntohl(ib_spec->ipv6.val.flow_label),
2211 ib_spec->type & IB_FLOW_SPEC_INNER);
2212
Maor Gottlieb026bae02016-06-17 15:14:51 +03002213 break;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002214 case IB_FLOW_SPEC_TCP:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03002215 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2216 LAST_TCP_UDP_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02002217 return -EOPNOTSUPP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002218
Moses Reuben2d1e6972016-11-14 19:04:52 +02002219 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002220 0xff);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002221 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002222 IPPROTO_TCP);
2223
Moses Reuben2d1e6972016-11-14 19:04:52 +02002224 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002225 ntohs(ib_spec->tcp_udp.mask.src_port));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002226 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002227 ntohs(ib_spec->tcp_udp.val.src_port));
2228
Moses Reuben2d1e6972016-11-14 19:04:52 +02002229 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002230 ntohs(ib_spec->tcp_udp.mask.dst_port));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002231 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002232 ntohs(ib_spec->tcp_udp.val.dst_port));
2233 break;
2234 case IB_FLOW_SPEC_UDP:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03002235 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2236 LAST_TCP_UDP_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02002237 return -EOPNOTSUPP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002238
Moses Reuben2d1e6972016-11-14 19:04:52 +02002239 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002240 0xff);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002241 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002242 IPPROTO_UDP);
2243
Moses Reuben2d1e6972016-11-14 19:04:52 +02002244 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002245 ntohs(ib_spec->tcp_udp.mask.src_port));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002246 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002247 ntohs(ib_spec->tcp_udp.val.src_port));
2248
Moses Reuben2d1e6972016-11-14 19:04:52 +02002249 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002250 ntohs(ib_spec->tcp_udp.mask.dst_port));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002251 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002252 ntohs(ib_spec->tcp_udp.val.dst_port));
2253 break;
Moses Reubenffb30d82016-11-14 19:04:50 +02002254 case IB_FLOW_SPEC_VXLAN_TUNNEL:
2255 if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
2256 LAST_TUNNEL_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02002257 return -EOPNOTSUPP;
Moses Reubenffb30d82016-11-14 19:04:50 +02002258
2259 MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
2260 ntohl(ib_spec->tunnel.mask.tunnel_id));
2261 MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
2262 ntohl(ib_spec->tunnel.val.tunnel_id));
2263 break;
Moses Reuben2ac693f2017-01-18 14:59:50 +02002264 case IB_FLOW_SPEC_ACTION_TAG:
2265 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
2266 LAST_FLOW_TAG_FIELD))
2267 return -EOPNOTSUPP;
2268 if (ib_spec->flow_tag.tag_id >= BIT(24))
2269 return -EINVAL;
2270
2271 *tag_id = ib_spec->flow_tag.tag_id;
2272 break;
Slava Shwartsmana22ed862017-04-03 13:13:52 +03002273 case IB_FLOW_SPEC_ACTION_DROP:
2274 if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
2275 LAST_DROP_FIELD))
2276 return -EOPNOTSUPP;
2277 *is_drop = true;
2278 break;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002279 default:
2280 return -EINVAL;
2281 }
2282
2283 return 0;
2284}
2285
2286/* If a flow could catch both multicast and unicast packets,
2287 * it won't fall into the multicast flow steering table and this rule
2288 * could steal other multicast packets.
2289 */
Yishai Hadasa550ddf2017-08-17 15:52:33 +03002290static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002291{
Yishai Hadas81e30882017-06-08 16:15:09 +03002292 union ib_flow_spec *flow_spec;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002293
2294 if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002295 ib_attr->num_of_specs < 1)
2296 return false;
2297
Yishai Hadas81e30882017-06-08 16:15:09 +03002298 flow_spec = (union ib_flow_spec *)(ib_attr + 1);
2299 if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
2300 struct ib_flow_spec_ipv4 *ipv4_spec;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002301
Yishai Hadas81e30882017-06-08 16:15:09 +03002302 ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
2303 if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
2304 return true;
2305
2306 return false;
2307 }
2308
2309 if (flow_spec->type == IB_FLOW_SPEC_ETH) {
2310 struct ib_flow_spec_eth *eth_spec;
2311
2312 eth_spec = (struct ib_flow_spec_eth *)flow_spec;
2313 return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
2314 is_multicast_ether_addr(eth_spec->val.dst_mac);
2315 }
2316
2317 return false;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002318}
2319
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002320static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
2321 const struct ib_flow_attr *flow_attr,
Ariel Levkovich0f750962017-04-03 13:11:02 +03002322 bool check_inner)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002323{
2324 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002325 int match_ipv = check_inner ?
2326 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2327 ft_field_support.inner_ip_version) :
2328 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2329 ft_field_support.outer_ip_version);
Ariel Levkovich0f750962017-04-03 13:11:02 +03002330 int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0;
2331 bool ipv4_spec_valid, ipv6_spec_valid;
2332 unsigned int ip_spec_type = 0;
2333 bool has_ethertype = false;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002334 unsigned int spec_index;
Ariel Levkovich0f750962017-04-03 13:11:02 +03002335 bool mask_valid = true;
2336 u16 eth_type = 0;
2337 bool type_valid;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002338
2339 /* Validate that ethertype is correct */
2340 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
Ariel Levkovich0f750962017-04-03 13:11:02 +03002341 if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) &&
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002342 ib_spec->eth.mask.ether_type) {
Ariel Levkovich0f750962017-04-03 13:11:02 +03002343 mask_valid = (ib_spec->eth.mask.ether_type ==
2344 htons(0xffff));
2345 has_ethertype = true;
2346 eth_type = ntohs(ib_spec->eth.val.ether_type);
2347 } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) ||
2348 (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) {
2349 ip_spec_type = ib_spec->type;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002350 }
2351 ib_spec = (void *)ib_spec + ib_spec->size;
2352 }
Ariel Levkovich0f750962017-04-03 13:11:02 +03002353
2354 type_valid = (!has_ethertype) || (!ip_spec_type);
2355 if (!type_valid && mask_valid) {
2356 ipv4_spec_valid = (eth_type == ETH_P_IP) &&
2357 (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit));
2358 ipv6_spec_valid = (eth_type == ETH_P_IPV6) &&
2359 (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit));
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002360
2361 type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) ||
2362 (((eth_type == ETH_P_MPLS_UC) ||
2363 (eth_type == ETH_P_MPLS_MC)) && match_ipv);
Ariel Levkovich0f750962017-04-03 13:11:02 +03002364 }
2365
2366 return type_valid;
2367}
2368
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002369static bool is_valid_attr(struct mlx5_core_dev *mdev,
2370 const struct ib_flow_attr *flow_attr)
Ariel Levkovich0f750962017-04-03 13:11:02 +03002371{
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002372 return is_valid_ethertype(mdev, flow_attr, false) &&
2373 is_valid_ethertype(mdev, flow_attr, true);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002374}
2375
2376static void put_flow_table(struct mlx5_ib_dev *dev,
2377 struct mlx5_ib_flow_prio *prio, bool ft_added)
2378{
2379 prio->refcount -= !!ft_added;
2380 if (!prio->refcount) {
2381 mlx5_destroy_flow_table(prio->flow_table);
2382 prio->flow_table = NULL;
2383 }
2384}
2385
2386static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
2387{
2388 struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
2389 struct mlx5_ib_flow_handler *handler = container_of(flow_id,
2390 struct mlx5_ib_flow_handler,
2391 ibflow);
2392 struct mlx5_ib_flow_handler *iter, *tmp;
2393
2394 mutex_lock(&dev->flow_db.lock);
2395
2396 list_for_each_entry_safe(iter, tmp, &handler->list, list) {
Mark Bloch74491de2016-08-31 11:24:25 +00002397 mlx5_del_flow_rules(iter->rule);
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002398 put_flow_table(dev, iter->prio, true);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002399 list_del(&iter->list);
2400 kfree(iter);
2401 }
2402
Mark Bloch74491de2016-08-31 11:24:25 +00002403 mlx5_del_flow_rules(handler->rule);
Maor Gottlieb5497adc2016-08-28 14:16:31 +03002404 put_flow_table(dev, handler->prio, true);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002405 mutex_unlock(&dev->flow_db.lock);
2406
2407 kfree(handler);
2408
2409 return 0;
2410}
2411
Maor Gottlieb35d190112016-03-07 18:51:47 +02002412static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
2413{
2414 priority *= 2;
2415 if (!dont_trap)
2416 priority++;
2417 return priority;
2418}
2419
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002420enum flow_table_type {
2421 MLX5_IB_FT_RX,
2422 MLX5_IB_FT_TX
2423};
2424
Maor Gottlieb00b7c2a2017-03-29 06:09:01 +03002425#define MLX5_FS_MAX_TYPES 6
2426#define MLX5_FS_MAX_ENTRIES BIT(16)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002427static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002428 struct ib_flow_attr *flow_attr,
2429 enum flow_table_type ft_type)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002430{
Maor Gottlieb35d190112016-03-07 18:51:47 +02002431 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002432 struct mlx5_flow_namespace *ns = NULL;
2433 struct mlx5_ib_flow_prio *prio;
2434 struct mlx5_flow_table *ft;
Maor Gottliebdac388e2017-03-29 06:09:00 +03002435 int max_table_size;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002436 int num_entries;
2437 int num_groups;
2438 int priority;
2439 int err = 0;
2440
Maor Gottliebdac388e2017-03-29 06:09:00 +03002441 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
2442 log_max_ft_size));
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002443 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
Maor Gottlieb35d190112016-03-07 18:51:47 +02002444 if (flow_is_multicast_only(flow_attr) &&
2445 !dont_trap)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002446 priority = MLX5_IB_FLOW_MCAST_PRIO;
2447 else
Maor Gottlieb35d190112016-03-07 18:51:47 +02002448 priority = ib_prio_to_core_prio(flow_attr->priority,
2449 dont_trap);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002450 ns = mlx5_get_flow_namespace(dev->mdev,
2451 MLX5_FLOW_NAMESPACE_BYPASS);
2452 num_entries = MLX5_FS_MAX_ENTRIES;
2453 num_groups = MLX5_FS_MAX_TYPES;
2454 prio = &dev->flow_db.prios[priority];
2455 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2456 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
2457 ns = mlx5_get_flow_namespace(dev->mdev,
2458 MLX5_FLOW_NAMESPACE_LEFTOVERS);
2459 build_leftovers_ft_param(&priority,
2460 &num_entries,
2461 &num_groups);
2462 prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002463 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2464 if (!MLX5_CAP_FLOWTABLE(dev->mdev,
2465 allow_sniffer_and_nic_rx_shared_tir))
2466 return ERR_PTR(-ENOTSUPP);
2467
2468 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
2469 MLX5_FLOW_NAMESPACE_SNIFFER_RX :
2470 MLX5_FLOW_NAMESPACE_SNIFFER_TX);
2471
2472 prio = &dev->flow_db.sniffer[ft_type];
2473 priority = 0;
2474 num_entries = 1;
2475 num_groups = 1;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002476 }
2477
2478 if (!ns)
2479 return ERR_PTR(-ENOTSUPP);
2480
Maor Gottliebdac388e2017-03-29 06:09:00 +03002481 if (num_entries > max_table_size)
2482 return ERR_PTR(-ENOMEM);
2483
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002484 ft = prio->flow_table;
2485 if (!ft) {
2486 ft = mlx5_create_auto_grouped_flow_table(ns, priority,
2487 num_entries,
Maor Gottliebd63cd282016-04-29 01:36:35 +03002488 num_groups,
Hadar Hen Zionc9f1b072016-11-07 15:14:44 +02002489 0, 0);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002490
2491 if (!IS_ERR(ft)) {
2492 prio->refcount = 0;
2493 prio->flow_table = ft;
2494 } else {
2495 err = PTR_ERR(ft);
2496 }
2497 }
2498
2499 return err ? ERR_PTR(err) : prio;
2500}
2501
Yishai Hadasa550ddf2017-08-17 15:52:33 +03002502static void set_underlay_qp(struct mlx5_ib_dev *dev,
2503 struct mlx5_flow_spec *spec,
2504 u32 underlay_qpn)
2505{
2506 void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
2507 spec->match_criteria,
2508 misc_parameters);
2509 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2510 misc_parameters);
2511
2512 if (underlay_qpn &&
2513 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
2514 ft_field_support.bth_dst_qp)) {
2515 MLX5_SET(fte_match_set_misc,
2516 misc_params_v, bth_dst_qp, underlay_qpn);
2517 MLX5_SET(fte_match_set_misc,
2518 misc_params_c, bth_dst_qp, 0xffffff);
2519 }
2520}
2521
2522static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
2523 struct mlx5_ib_flow_prio *ft_prio,
2524 const struct ib_flow_attr *flow_attr,
2525 struct mlx5_flow_destination *dst,
2526 u32 underlay_qpn)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002527{
2528 struct mlx5_flow_table *ft = ft_prio->flow_table;
2529 struct mlx5_ib_flow_handler *handler;
Hadar Hen Zion66958ed2016-11-07 15:14:45 +02002530 struct mlx5_flow_act flow_act = {0};
Maor Gottliebc5bb1732016-07-04 17:23:05 +03002531 struct mlx5_flow_spec *spec;
Slava Shwartsmana22ed862017-04-03 13:13:52 +03002532 struct mlx5_flow_destination *rule_dst = dst;
Maor Gottliebdd063d02016-08-28 14:16:32 +03002533 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002534 unsigned int spec_index;
Moses Reuben2ac693f2017-01-18 14:59:50 +02002535 u32 flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
Slava Shwartsmana22ed862017-04-03 13:13:52 +03002536 bool is_drop = false;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002537 int err = 0;
Slava Shwartsmana22ed862017-04-03 13:13:52 +03002538 int dest_num = 1;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002539
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002540 if (!is_valid_attr(dev->mdev, flow_attr))
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002541 return ERR_PTR(-EINVAL);
2542
Leon Romanovsky1b9a07e2017-05-10 21:32:18 +03002543 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002544 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
Maor Gottliebc5bb1732016-07-04 17:23:05 +03002545 if (!handler || !spec) {
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002546 err = -ENOMEM;
2547 goto free;
2548 }
2549
2550 INIT_LIST_HEAD(&handler->list);
2551
2552 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002553 err = parse_flow_attr(dev->mdev, spec->match_criteria,
Slava Shwartsmana22ed862017-04-03 13:13:52 +03002554 spec->match_value,
2555 ib_flow, &flow_tag, &is_drop);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002556 if (err < 0)
2557 goto free;
2558
2559 ib_flow += ((union ib_flow_spec *)ib_flow)->size;
2560 }
2561
Yishai Hadasa550ddf2017-08-17 15:52:33 +03002562 if (!flow_is_multicast_only(flow_attr))
2563 set_underlay_qp(dev, spec, underlay_qpn);
2564
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002565 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
Slava Shwartsmana22ed862017-04-03 13:13:52 +03002566 if (is_drop) {
2567 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
2568 rule_dst = NULL;
2569 dest_num = 0;
2570 } else {
2571 flow_act.action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
2572 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
2573 }
Moses Reuben2ac693f2017-01-18 14:59:50 +02002574
2575 if (flow_tag != MLX5_FS_DEFAULT_FLOW_TAG &&
2576 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2577 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
2578 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
2579 flow_tag, flow_attr->type);
2580 err = -EINVAL;
2581 goto free;
2582 }
2583 flow_act.flow_tag = flow_tag;
Mark Bloch74491de2016-08-31 11:24:25 +00002584 handler->rule = mlx5_add_flow_rules(ft, spec,
Hadar Hen Zion66958ed2016-11-07 15:14:45 +02002585 &flow_act,
Slava Shwartsmana22ed862017-04-03 13:13:52 +03002586 rule_dst, dest_num);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002587
2588 if (IS_ERR(handler->rule)) {
2589 err = PTR_ERR(handler->rule);
2590 goto free;
2591 }
2592
Maor Gottliebd9d49802016-08-28 14:16:33 +03002593 ft_prio->refcount++;
Maor Gottlieb5497adc2016-08-28 14:16:31 +03002594 handler->prio = ft_prio;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002595
2596 ft_prio->flow_table = ft;
2597free:
2598 if (err)
2599 kfree(handler);
Maor Gottliebc5bb1732016-07-04 17:23:05 +03002600 kvfree(spec);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002601 return err ? ERR_PTR(err) : handler;
2602}
2603
Yishai Hadasa550ddf2017-08-17 15:52:33 +03002604static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
2605 struct mlx5_ib_flow_prio *ft_prio,
2606 const struct ib_flow_attr *flow_attr,
2607 struct mlx5_flow_destination *dst)
2608{
2609 return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0);
2610}
2611
Maor Gottlieb35d190112016-03-07 18:51:47 +02002612static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
2613 struct mlx5_ib_flow_prio *ft_prio,
2614 struct ib_flow_attr *flow_attr,
2615 struct mlx5_flow_destination *dst)
2616{
2617 struct mlx5_ib_flow_handler *handler_dst = NULL;
2618 struct mlx5_ib_flow_handler *handler = NULL;
2619
2620 handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
2621 if (!IS_ERR(handler)) {
2622 handler_dst = create_flow_rule(dev, ft_prio,
2623 flow_attr, dst);
2624 if (IS_ERR(handler_dst)) {
Mark Bloch74491de2016-08-31 11:24:25 +00002625 mlx5_del_flow_rules(handler->rule);
Maor Gottliebd9d49802016-08-28 14:16:33 +03002626 ft_prio->refcount--;
Maor Gottlieb35d190112016-03-07 18:51:47 +02002627 kfree(handler);
2628 handler = handler_dst;
2629 } else {
2630 list_add(&handler_dst->list, &handler->list);
2631 }
2632 }
2633
2634 return handler;
2635}
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002636enum {
2637 LEFTOVERS_MC,
2638 LEFTOVERS_UC,
2639};
2640
2641static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
2642 struct mlx5_ib_flow_prio *ft_prio,
2643 struct ib_flow_attr *flow_attr,
2644 struct mlx5_flow_destination *dst)
2645{
2646 struct mlx5_ib_flow_handler *handler_ucast = NULL;
2647 struct mlx5_ib_flow_handler *handler = NULL;
2648
2649 static struct {
2650 struct ib_flow_attr flow_attr;
2651 struct ib_flow_spec_eth eth_flow;
2652 } leftovers_specs[] = {
2653 [LEFTOVERS_MC] = {
2654 .flow_attr = {
2655 .num_of_specs = 1,
2656 .size = sizeof(leftovers_specs[0])
2657 },
2658 .eth_flow = {
2659 .type = IB_FLOW_SPEC_ETH,
2660 .size = sizeof(struct ib_flow_spec_eth),
2661 .mask = {.dst_mac = {0x1} },
2662 .val = {.dst_mac = {0x1} }
2663 }
2664 },
2665 [LEFTOVERS_UC] = {
2666 .flow_attr = {
2667 .num_of_specs = 1,
2668 .size = sizeof(leftovers_specs[0])
2669 },
2670 .eth_flow = {
2671 .type = IB_FLOW_SPEC_ETH,
2672 .size = sizeof(struct ib_flow_spec_eth),
2673 .mask = {.dst_mac = {0x1} },
2674 .val = {.dst_mac = {} }
2675 }
2676 }
2677 };
2678
2679 handler = create_flow_rule(dev, ft_prio,
2680 &leftovers_specs[LEFTOVERS_MC].flow_attr,
2681 dst);
2682 if (!IS_ERR(handler) &&
2683 flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
2684 handler_ucast = create_flow_rule(dev, ft_prio,
2685 &leftovers_specs[LEFTOVERS_UC].flow_attr,
2686 dst);
2687 if (IS_ERR(handler_ucast)) {
Mark Bloch74491de2016-08-31 11:24:25 +00002688 mlx5_del_flow_rules(handler->rule);
Maor Gottliebd9d49802016-08-28 14:16:33 +03002689 ft_prio->refcount--;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002690 kfree(handler);
2691 handler = handler_ucast;
2692 } else {
2693 list_add(&handler_ucast->list, &handler->list);
2694 }
2695 }
2696
2697 return handler;
2698}
2699
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002700static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
2701 struct mlx5_ib_flow_prio *ft_rx,
2702 struct mlx5_ib_flow_prio *ft_tx,
2703 struct mlx5_flow_destination *dst)
2704{
2705 struct mlx5_ib_flow_handler *handler_rx;
2706 struct mlx5_ib_flow_handler *handler_tx;
2707 int err;
2708 static const struct ib_flow_attr flow_attr = {
2709 .num_of_specs = 0,
2710 .size = sizeof(flow_attr)
2711 };
2712
2713 handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
2714 if (IS_ERR(handler_rx)) {
2715 err = PTR_ERR(handler_rx);
2716 goto err;
2717 }
2718
2719 handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
2720 if (IS_ERR(handler_tx)) {
2721 err = PTR_ERR(handler_tx);
2722 goto err_tx;
2723 }
2724
2725 list_add(&handler_tx->list, &handler_rx->list);
2726
2727 return handler_rx;
2728
2729err_tx:
Mark Bloch74491de2016-08-31 11:24:25 +00002730 mlx5_del_flow_rules(handler_rx->rule);
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002731 ft_rx->refcount--;
2732 kfree(handler_rx);
2733err:
2734 return ERR_PTR(err);
2735}
2736
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002737static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
2738 struct ib_flow_attr *flow_attr,
2739 int domain)
2740{
2741 struct mlx5_ib_dev *dev = to_mdev(qp->device);
Yishai Hadasd9f88e52016-08-28 10:58:37 +03002742 struct mlx5_ib_qp *mqp = to_mqp(qp);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002743 struct mlx5_ib_flow_handler *handler = NULL;
2744 struct mlx5_flow_destination *dst = NULL;
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002745 struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002746 struct mlx5_ib_flow_prio *ft_prio;
2747 int err;
Yishai Hadasa550ddf2017-08-17 15:52:33 +03002748 int underlay_qpn;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002749
2750 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
Maor Gottliebdac388e2017-03-29 06:09:00 +03002751 return ERR_PTR(-ENOMEM);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002752
2753 if (domain != IB_FLOW_DOMAIN_USER ||
2754 flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
Maor Gottlieb35d190112016-03-07 18:51:47 +02002755 (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002756 return ERR_PTR(-EINVAL);
2757
2758 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
2759 if (!dst)
2760 return ERR_PTR(-ENOMEM);
2761
2762 mutex_lock(&dev->flow_db.lock);
2763
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002764 ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002765 if (IS_ERR(ft_prio)) {
2766 err = PTR_ERR(ft_prio);
2767 goto unlock;
2768 }
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002769 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2770 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
2771 if (IS_ERR(ft_prio_tx)) {
2772 err = PTR_ERR(ft_prio_tx);
2773 ft_prio_tx = NULL;
2774 goto destroy_ft;
2775 }
2776 }
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002777
2778 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
Yishai Hadasd9f88e52016-08-28 10:58:37 +03002779 if (mqp->flags & MLX5_IB_QP_RSS)
2780 dst->tir_num = mqp->rss_qp.tirn;
2781 else
2782 dst->tir_num = mqp->raw_packet_qp.rq.tirn;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002783
2784 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
Maor Gottlieb35d190112016-03-07 18:51:47 +02002785 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
2786 handler = create_dont_trap_rule(dev, ft_prio,
2787 flow_attr, dst);
2788 } else {
Yishai Hadasa550ddf2017-08-17 15:52:33 +03002789 underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
2790 mqp->underlay_qpn : 0;
2791 handler = _create_flow_rule(dev, ft_prio, flow_attr,
2792 dst, underlay_qpn);
Maor Gottlieb35d190112016-03-07 18:51:47 +02002793 }
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002794 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2795 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
2796 handler = create_leftovers_rule(dev, ft_prio, flow_attr,
2797 dst);
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002798 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2799 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002800 } else {
2801 err = -EINVAL;
2802 goto destroy_ft;
2803 }
2804
2805 if (IS_ERR(handler)) {
2806 err = PTR_ERR(handler);
2807 handler = NULL;
2808 goto destroy_ft;
2809 }
2810
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002811 mutex_unlock(&dev->flow_db.lock);
2812 kfree(dst);
2813
2814 return &handler->ibflow;
2815
2816destroy_ft:
2817 put_flow_table(dev, ft_prio, false);
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002818 if (ft_prio_tx)
2819 put_flow_table(dev, ft_prio_tx, false);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002820unlock:
2821 mutex_unlock(&dev->flow_db.lock);
2822 kfree(dst);
2823 kfree(handler);
2824 return ERR_PTR(err);
2825}
2826
Eli Cohene126ba92013-07-07 17:25:49 +03002827static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2828{
2829 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
Yishai Hadas81e30882017-06-08 16:15:09 +03002830 struct mlx5_ib_qp *mqp = to_mqp(ibqp);
Eli Cohene126ba92013-07-07 17:25:49 +03002831 int err;
2832
Yishai Hadas81e30882017-06-08 16:15:09 +03002833 if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
2834 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
2835 return -EOPNOTSUPP;
2836 }
2837
Jack Morgenstein9603b612014-07-28 23:30:22 +03002838 err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
Eli Cohene126ba92013-07-07 17:25:49 +03002839 if (err)
2840 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
2841 ibqp->qp_num, gid->raw);
2842
2843 return err;
2844}
2845
2846static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2847{
2848 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2849 int err;
2850
Jack Morgenstein9603b612014-07-28 23:30:22 +03002851 err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
Eli Cohene126ba92013-07-07 17:25:49 +03002852 if (err)
2853 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
2854 ibqp->qp_num, gid->raw);
2855
2856 return err;
2857}
2858
2859static int init_node_data(struct mlx5_ib_dev *dev)
2860{
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03002861 int err;
Eli Cohene126ba92013-07-07 17:25:49 +03002862
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03002863 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
Eli Cohene126ba92013-07-07 17:25:49 +03002864 if (err)
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03002865 return err;
Eli Cohene126ba92013-07-07 17:25:49 +03002866
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03002867 dev->mdev->rev_id = dev->mdev->pdev->revision;
Eli Cohene126ba92013-07-07 17:25:49 +03002868
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03002869 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
Eli Cohene126ba92013-07-07 17:25:49 +03002870}
2871
2872static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
2873 char *buf)
2874{
2875 struct mlx5_ib_dev *dev =
2876 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2877
Jack Morgenstein9603b612014-07-28 23:30:22 +03002878 return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
Eli Cohene126ba92013-07-07 17:25:49 +03002879}
2880
2881static ssize_t show_reg_pages(struct device *device,
2882 struct device_attribute *attr, char *buf)
2883{
2884 struct mlx5_ib_dev *dev =
2885 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2886
Haggai Eran6aec21f2014-12-11 17:04:23 +02002887 return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
Eli Cohene126ba92013-07-07 17:25:49 +03002888}
2889
2890static ssize_t show_hca(struct device *device, struct device_attribute *attr,
2891 char *buf)
2892{
2893 struct mlx5_ib_dev *dev =
2894 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
Jack Morgenstein9603b612014-07-28 23:30:22 +03002895 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
Eli Cohene126ba92013-07-07 17:25:49 +03002896}
2897
Eli Cohene126ba92013-07-07 17:25:49 +03002898static ssize_t show_rev(struct device *device, struct device_attribute *attr,
2899 char *buf)
2900{
2901 struct mlx5_ib_dev *dev =
2902 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
Jack Morgenstein9603b612014-07-28 23:30:22 +03002903 return sprintf(buf, "%x\n", dev->mdev->rev_id);
Eli Cohene126ba92013-07-07 17:25:49 +03002904}
2905
2906static ssize_t show_board(struct device *device, struct device_attribute *attr,
2907 char *buf)
2908{
2909 struct mlx5_ib_dev *dev =
2910 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2911 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
Jack Morgenstein9603b612014-07-28 23:30:22 +03002912 dev->mdev->board_id);
Eli Cohene126ba92013-07-07 17:25:49 +03002913}
2914
2915static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
Eli Cohene126ba92013-07-07 17:25:49 +03002916static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
2917static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
2918static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
2919static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
2920
2921static struct device_attribute *mlx5_class_attributes[] = {
2922 &dev_attr_hw_rev,
Eli Cohene126ba92013-07-07 17:25:49 +03002923 &dev_attr_hca_type,
2924 &dev_attr_board_id,
2925 &dev_attr_fw_pages,
2926 &dev_attr_reg_pages,
2927};
2928
Haggai Eran7722f472016-02-29 15:45:07 +02002929static void pkey_change_handler(struct work_struct *work)
2930{
2931 struct mlx5_ib_port_resources *ports =
2932 container_of(work, struct mlx5_ib_port_resources,
2933 pkey_change_work);
2934
2935 mutex_lock(&ports->devr->mutex);
2936 mlx5_ib_gsi_pkey_change(ports->gsi);
2937 mutex_unlock(&ports->devr->mutex);
2938}
2939
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03002940static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
2941{
2942 struct mlx5_ib_qp *mqp;
2943 struct mlx5_ib_cq *send_mcq, *recv_mcq;
2944 struct mlx5_core_cq *mcq;
2945 struct list_head cq_armed_list;
2946 unsigned long flags_qp;
2947 unsigned long flags_cq;
2948 unsigned long flags;
2949
2950 INIT_LIST_HEAD(&cq_armed_list);
2951
2952 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2953 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2954 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2955 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
2956 if (mqp->sq.tail != mqp->sq.head) {
2957 send_mcq = to_mcq(mqp->ibqp.send_cq);
2958 spin_lock_irqsave(&send_mcq->lock, flags_cq);
2959 if (send_mcq->mcq.comp &&
2960 mqp->ibqp.send_cq->comp_handler) {
2961 if (!send_mcq->mcq.reset_notify_added) {
2962 send_mcq->mcq.reset_notify_added = 1;
2963 list_add_tail(&send_mcq->mcq.reset_notify,
2964 &cq_armed_list);
2965 }
2966 }
2967 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
2968 }
2969 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
2970 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
2971 /* no handling is needed for SRQ */
2972 if (!mqp->ibqp.srq) {
2973 if (mqp->rq.tail != mqp->rq.head) {
2974 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
2975 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
2976 if (recv_mcq->mcq.comp &&
2977 mqp->ibqp.recv_cq->comp_handler) {
2978 if (!recv_mcq->mcq.reset_notify_added) {
2979 recv_mcq->mcq.reset_notify_added = 1;
2980 list_add_tail(&recv_mcq->mcq.reset_notify,
2981 &cq_armed_list);
2982 }
2983 }
2984 spin_unlock_irqrestore(&recv_mcq->lock,
2985 flags_cq);
2986 }
2987 }
2988 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
2989 }
2990 /*At that point all inflight post send were put to be executed as of we
2991 * lock/unlock above locks Now need to arm all involved CQs.
2992 */
2993 list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
2994 mcq->comp(mcq);
2995 }
2996 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2997}
2998
Maor Gottlieb03404e82017-05-30 10:29:13 +03002999static void delay_drop_handler(struct work_struct *work)
3000{
3001 int err;
3002 struct mlx5_ib_delay_drop *delay_drop =
3003 container_of(work, struct mlx5_ib_delay_drop,
3004 delay_drop_work);
3005
Maor Gottliebfe248c32017-05-30 10:29:14 +03003006 atomic_inc(&delay_drop->events_cnt);
3007
Maor Gottlieb03404e82017-05-30 10:29:13 +03003008 mutex_lock(&delay_drop->lock);
3009 err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
3010 delay_drop->timeout);
3011 if (err) {
3012 mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
3013 delay_drop->timeout);
3014 delay_drop->activate = false;
3015 }
3016 mutex_unlock(&delay_drop->lock);
3017}
3018
Jack Morgenstein9603b612014-07-28 23:30:22 +03003019static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03003020 enum mlx5_dev_event event, unsigned long param)
Eli Cohene126ba92013-07-07 17:25:49 +03003021{
Jack Morgenstein9603b612014-07-28 23:30:22 +03003022 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
Eli Cohene126ba92013-07-07 17:25:49 +03003023 struct ib_event ibev;
Eli Cohendbaaff22016-10-27 16:36:44 +03003024 bool fatal = false;
Eli Cohene126ba92013-07-07 17:25:49 +03003025 u8 port = 0;
3026
3027 switch (event) {
3028 case MLX5_DEV_EVENT_SYS_ERROR:
Eli Cohene126ba92013-07-07 17:25:49 +03003029 ibev.event = IB_EVENT_DEVICE_FATAL;
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03003030 mlx5_ib_handle_internal_error(ibdev);
Eli Cohendbaaff22016-10-27 16:36:44 +03003031 fatal = true;
Eli Cohene126ba92013-07-07 17:25:49 +03003032 break;
3033
3034 case MLX5_DEV_EVENT_PORT_UP:
Eli Cohene126ba92013-07-07 17:25:49 +03003035 case MLX5_DEV_EVENT_PORT_DOWN:
Noa Osherovich2788cf32016-06-04 15:15:29 +03003036 case MLX5_DEV_EVENT_PORT_INITIALIZED:
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03003037 port = (u8)param;
Aviv Heller5ec8c832016-09-18 20:48:00 +03003038
3039 /* In RoCE, port up/down events are handled in
3040 * mlx5_netdev_event().
3041 */
3042 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
3043 IB_LINK_LAYER_ETHERNET)
3044 return;
3045
3046 ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
3047 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
Eli Cohene126ba92013-07-07 17:25:49 +03003048 break;
3049
Eli Cohene126ba92013-07-07 17:25:49 +03003050 case MLX5_DEV_EVENT_LID_CHANGE:
3051 ibev.event = IB_EVENT_LID_CHANGE;
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03003052 port = (u8)param;
Eli Cohene126ba92013-07-07 17:25:49 +03003053 break;
3054
3055 case MLX5_DEV_EVENT_PKEY_CHANGE:
3056 ibev.event = IB_EVENT_PKEY_CHANGE;
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03003057 port = (u8)param;
Haggai Eran7722f472016-02-29 15:45:07 +02003058
3059 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
Eli Cohene126ba92013-07-07 17:25:49 +03003060 break;
3061
3062 case MLX5_DEV_EVENT_GUID_CHANGE:
3063 ibev.event = IB_EVENT_GID_CHANGE;
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03003064 port = (u8)param;
Eli Cohene126ba92013-07-07 17:25:49 +03003065 break;
3066
3067 case MLX5_DEV_EVENT_CLIENT_REREG:
3068 ibev.event = IB_EVENT_CLIENT_REREGISTER;
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03003069 port = (u8)param;
Eli Cohene126ba92013-07-07 17:25:49 +03003070 break;
Maor Gottlieb03404e82017-05-30 10:29:13 +03003071 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
3072 schedule_work(&ibdev->delay_drop.delay_drop_work);
3073 goto out;
Saeed Mahameedbdc37922016-09-29 19:35:38 +03003074 default:
Maor Gottlieb03404e82017-05-30 10:29:13 +03003075 goto out;
Eli Cohene126ba92013-07-07 17:25:49 +03003076 }
3077
3078 ibev.device = &ibdev->ib_dev;
3079 ibev.element.port_num = port;
3080
Eli Cohena0c84c32013-09-11 16:35:27 +03003081 if (port < 1 || port > ibdev->num_ports) {
3082 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
Maor Gottlieb03404e82017-05-30 10:29:13 +03003083 goto out;
Eli Cohena0c84c32013-09-11 16:35:27 +03003084 }
3085
Eli Cohene126ba92013-07-07 17:25:49 +03003086 if (ibdev->ib_active)
3087 ib_dispatch_event(&ibev);
Eli Cohendbaaff22016-10-27 16:36:44 +03003088
3089 if (fatal)
3090 ibdev->ib_active = false;
Maor Gottlieb03404e82017-05-30 10:29:13 +03003091
3092out:
3093 return;
Eli Cohene126ba92013-07-07 17:25:49 +03003094}
3095
Maor Gottliebc43f1112017-01-18 14:10:33 +02003096static int set_has_smi_cap(struct mlx5_ib_dev *dev)
3097{
3098 struct mlx5_hca_vport_context vport_ctx;
3099 int err;
3100 int port;
3101
3102 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
3103 dev->mdev->port_caps[port - 1].has_smi = false;
3104 if (MLX5_CAP_GEN(dev->mdev, port_type) ==
3105 MLX5_CAP_PORT_TYPE_IB) {
3106 if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
3107 err = mlx5_query_hca_vport_context(dev->mdev, 0,
3108 port, 0,
3109 &vport_ctx);
3110 if (err) {
3111 mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
3112 port, err);
3113 return err;
3114 }
3115 dev->mdev->port_caps[port - 1].has_smi =
3116 vport_ctx.has_smi;
3117 } else {
3118 dev->mdev->port_caps[port - 1].has_smi = true;
3119 }
3120 }
3121 }
3122 return 0;
3123}
3124
Eli Cohene126ba92013-07-07 17:25:49 +03003125static void get_ext_port_caps(struct mlx5_ib_dev *dev)
3126{
3127 int port;
3128
Saeed Mahameed938fe832015-05-28 22:28:41 +03003129 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
Eli Cohene126ba92013-07-07 17:25:49 +03003130 mlx5_query_ext_port_caps(dev, port);
3131}
3132
3133static int get_port_caps(struct mlx5_ib_dev *dev)
3134{
3135 struct ib_device_attr *dprops = NULL;
3136 struct ib_port_attr *pprops = NULL;
Dan Carpenterf614fc12015-01-12 11:56:58 +03003137 int err = -ENOMEM;
Eli Cohene126ba92013-07-07 17:25:49 +03003138 int port;
Matan Barak2528e332015-06-11 16:35:25 +03003139 struct ib_udata uhw = {.inlen = 0, .outlen = 0};
Eli Cohene126ba92013-07-07 17:25:49 +03003140
3141 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
3142 if (!pprops)
3143 goto out;
3144
3145 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
3146 if (!dprops)
3147 goto out;
3148
Maor Gottliebc43f1112017-01-18 14:10:33 +02003149 err = set_has_smi_cap(dev);
3150 if (err)
3151 goto out;
3152
Matan Barak2528e332015-06-11 16:35:25 +03003153 err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
Eli Cohene126ba92013-07-07 17:25:49 +03003154 if (err) {
3155 mlx5_ib_warn(dev, "query_device failed %d\n", err);
3156 goto out;
3157 }
3158
Saeed Mahameed938fe832015-05-28 22:28:41 +03003159 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
Or Gerlitzc4550c62017-01-24 13:02:39 +02003160 memset(pprops, 0, sizeof(*pprops));
Eli Cohene126ba92013-07-07 17:25:49 +03003161 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
3162 if (err) {
Saeed Mahameed938fe832015-05-28 22:28:41 +03003163 mlx5_ib_warn(dev, "query_port %d failed %d\n",
3164 port, err);
Eli Cohene126ba92013-07-07 17:25:49 +03003165 break;
3166 }
Saeed Mahameed938fe832015-05-28 22:28:41 +03003167 dev->mdev->port_caps[port - 1].pkey_table_len =
3168 dprops->max_pkeys;
3169 dev->mdev->port_caps[port - 1].gid_table_len =
3170 pprops->gid_tbl_len;
Eli Cohene126ba92013-07-07 17:25:49 +03003171 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
3172 dprops->max_pkeys, pprops->gid_tbl_len);
3173 }
3174
3175out:
3176 kfree(pprops);
3177 kfree(dprops);
3178
3179 return err;
3180}
3181
3182static void destroy_umrc_res(struct mlx5_ib_dev *dev)
3183{
3184 int err;
3185
3186 err = mlx5_mr_cache_cleanup(dev);
3187 if (err)
3188 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
3189
3190 mlx5_ib_destroy_qp(dev->umrc.qp);
Christoph Hellwigadd08d72016-03-03 09:38:22 +01003191 ib_free_cq(dev->umrc.cq);
Eli Cohene126ba92013-07-07 17:25:49 +03003192 ib_dealloc_pd(dev->umrc.pd);
3193}
3194
3195enum {
3196 MAX_UMR_WR = 128,
3197};
3198
3199static int create_umr_res(struct mlx5_ib_dev *dev)
3200{
3201 struct ib_qp_init_attr *init_attr = NULL;
3202 struct ib_qp_attr *attr = NULL;
3203 struct ib_pd *pd;
3204 struct ib_cq *cq;
3205 struct ib_qp *qp;
Eli Cohene126ba92013-07-07 17:25:49 +03003206 int ret;
3207
3208 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
3209 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
3210 if (!attr || !init_attr) {
3211 ret = -ENOMEM;
3212 goto error_0;
3213 }
3214
Christoph Hellwiged082d32016-09-05 12:56:17 +02003215 pd = ib_alloc_pd(&dev->ib_dev, 0);
Eli Cohene126ba92013-07-07 17:25:49 +03003216 if (IS_ERR(pd)) {
3217 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
3218 ret = PTR_ERR(pd);
3219 goto error_0;
3220 }
3221
Christoph Hellwigadd08d72016-03-03 09:38:22 +01003222 cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
Eli Cohene126ba92013-07-07 17:25:49 +03003223 if (IS_ERR(cq)) {
3224 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
3225 ret = PTR_ERR(cq);
3226 goto error_2;
3227 }
Eli Cohene126ba92013-07-07 17:25:49 +03003228
3229 init_attr->send_cq = cq;
3230 init_attr->recv_cq = cq;
3231 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
3232 init_attr->cap.max_send_wr = MAX_UMR_WR;
3233 init_attr->cap.max_send_sge = 1;
3234 init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
3235 init_attr->port_num = 1;
3236 qp = mlx5_ib_create_qp(pd, init_attr, NULL);
3237 if (IS_ERR(qp)) {
3238 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
3239 ret = PTR_ERR(qp);
3240 goto error_3;
3241 }
3242 qp->device = &dev->ib_dev;
3243 qp->real_qp = qp;
3244 qp->uobject = NULL;
3245 qp->qp_type = MLX5_IB_QPT_REG_UMR;
Majd Dibbiny31fde032017-10-30 14:23:13 +02003246 qp->send_cq = init_attr->send_cq;
3247 qp->recv_cq = init_attr->recv_cq;
Eli Cohene126ba92013-07-07 17:25:49 +03003248
3249 attr->qp_state = IB_QPS_INIT;
3250 attr->port_num = 1;
3251 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
3252 IB_QP_PORT, NULL);
3253 if (ret) {
3254 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
3255 goto error_4;
3256 }
3257
3258 memset(attr, 0, sizeof(*attr));
3259 attr->qp_state = IB_QPS_RTR;
3260 attr->path_mtu = IB_MTU_256;
3261
3262 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
3263 if (ret) {
3264 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
3265 goto error_4;
3266 }
3267
3268 memset(attr, 0, sizeof(*attr));
3269 attr->qp_state = IB_QPS_RTS;
3270 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
3271 if (ret) {
3272 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
3273 goto error_4;
3274 }
3275
3276 dev->umrc.qp = qp;
3277 dev->umrc.cq = cq;
Eli Cohene126ba92013-07-07 17:25:49 +03003278 dev->umrc.pd = pd;
3279
3280 sema_init(&dev->umrc.sem, MAX_UMR_WR);
3281 ret = mlx5_mr_cache_init(dev);
3282 if (ret) {
3283 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
3284 goto error_4;
3285 }
3286
3287 kfree(attr);
3288 kfree(init_attr);
3289
3290 return 0;
3291
3292error_4:
3293 mlx5_ib_destroy_qp(qp);
3294
3295error_3:
Christoph Hellwigadd08d72016-03-03 09:38:22 +01003296 ib_free_cq(cq);
Eli Cohene126ba92013-07-07 17:25:49 +03003297
3298error_2:
Eli Cohene126ba92013-07-07 17:25:49 +03003299 ib_dealloc_pd(pd);
3300
3301error_0:
3302 kfree(attr);
3303 kfree(init_attr);
3304 return ret;
3305}
3306
Max Gurtovoy6e8484c2017-05-28 10:53:11 +03003307static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
3308{
3309 switch (umr_fence_cap) {
3310 case MLX5_CAP_UMR_FENCE_NONE:
3311 return MLX5_FENCE_MODE_NONE;
3312 case MLX5_CAP_UMR_FENCE_SMALL:
3313 return MLX5_FENCE_MODE_INITIATOR_SMALL;
3314 default:
3315 return MLX5_FENCE_MODE_STRONG_ORDERING;
3316 }
3317}
3318
Eli Cohene126ba92013-07-07 17:25:49 +03003319static int create_dev_resources(struct mlx5_ib_resources *devr)
3320{
3321 struct ib_srq_init_attr attr;
3322 struct mlx5_ib_dev *dev;
Matan Barakbcf4c1e2015-06-11 16:35:20 +03003323 struct ib_cq_init_attr cq_attr = {.cqe = 1};
Haggai Eran7722f472016-02-29 15:45:07 +02003324 int port;
Eli Cohene126ba92013-07-07 17:25:49 +03003325 int ret = 0;
3326
3327 dev = container_of(devr, struct mlx5_ib_dev, devr);
3328
Haggai Erand16e91d2016-02-29 15:45:05 +02003329 mutex_init(&devr->mutex);
3330
Eli Cohene126ba92013-07-07 17:25:49 +03003331 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
3332 if (IS_ERR(devr->p0)) {
3333 ret = PTR_ERR(devr->p0);
3334 goto error0;
3335 }
3336 devr->p0->device = &dev->ib_dev;
3337 devr->p0->uobject = NULL;
3338 atomic_set(&devr->p0->usecnt, 0);
3339
Matan Barakbcf4c1e2015-06-11 16:35:20 +03003340 devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
Eli Cohene126ba92013-07-07 17:25:49 +03003341 if (IS_ERR(devr->c0)) {
3342 ret = PTR_ERR(devr->c0);
3343 goto error1;
3344 }
3345 devr->c0->device = &dev->ib_dev;
3346 devr->c0->uobject = NULL;
3347 devr->c0->comp_handler = NULL;
3348 devr->c0->event_handler = NULL;
3349 devr->c0->cq_context = NULL;
3350 atomic_set(&devr->c0->usecnt, 0);
3351
3352 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
3353 if (IS_ERR(devr->x0)) {
3354 ret = PTR_ERR(devr->x0);
3355 goto error2;
3356 }
3357 devr->x0->device = &dev->ib_dev;
3358 devr->x0->inode = NULL;
3359 atomic_set(&devr->x0->usecnt, 0);
3360 mutex_init(&devr->x0->tgt_qp_mutex);
3361 INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
3362
3363 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
3364 if (IS_ERR(devr->x1)) {
3365 ret = PTR_ERR(devr->x1);
3366 goto error3;
3367 }
3368 devr->x1->device = &dev->ib_dev;
3369 devr->x1->inode = NULL;
3370 atomic_set(&devr->x1->usecnt, 0);
3371 mutex_init(&devr->x1->tgt_qp_mutex);
3372 INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
3373
3374 memset(&attr, 0, sizeof(attr));
3375 attr.attr.max_sge = 1;
3376 attr.attr.max_wr = 1;
3377 attr.srq_type = IB_SRQT_XRC;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03003378 attr.ext.cq = devr->c0;
Eli Cohene126ba92013-07-07 17:25:49 +03003379 attr.ext.xrc.xrcd = devr->x0;
3380
3381 devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
3382 if (IS_ERR(devr->s0)) {
3383 ret = PTR_ERR(devr->s0);
3384 goto error4;
3385 }
3386 devr->s0->device = &dev->ib_dev;
3387 devr->s0->pd = devr->p0;
3388 devr->s0->uobject = NULL;
3389 devr->s0->event_handler = NULL;
3390 devr->s0->srq_context = NULL;
3391 devr->s0->srq_type = IB_SRQT_XRC;
3392 devr->s0->ext.xrc.xrcd = devr->x0;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03003393 devr->s0->ext.cq = devr->c0;
Eli Cohene126ba92013-07-07 17:25:49 +03003394 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03003395 atomic_inc(&devr->s0->ext.cq->usecnt);
Eli Cohene126ba92013-07-07 17:25:49 +03003396 atomic_inc(&devr->p0->usecnt);
3397 atomic_set(&devr->s0->usecnt, 0);
3398
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03003399 memset(&attr, 0, sizeof(attr));
3400 attr.attr.max_sge = 1;
3401 attr.attr.max_wr = 1;
3402 attr.srq_type = IB_SRQT_BASIC;
3403 devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
3404 if (IS_ERR(devr->s1)) {
3405 ret = PTR_ERR(devr->s1);
3406 goto error5;
3407 }
3408 devr->s1->device = &dev->ib_dev;
3409 devr->s1->pd = devr->p0;
3410 devr->s1->uobject = NULL;
3411 devr->s1->event_handler = NULL;
3412 devr->s1->srq_context = NULL;
3413 devr->s1->srq_type = IB_SRQT_BASIC;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03003414 devr->s1->ext.cq = devr->c0;
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03003415 atomic_inc(&devr->p0->usecnt);
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03003416 atomic_set(&devr->s1->usecnt, 0);
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03003417
Haggai Eran7722f472016-02-29 15:45:07 +02003418 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
3419 INIT_WORK(&devr->ports[port].pkey_change_work,
3420 pkey_change_handler);
3421 devr->ports[port].devr = devr;
3422 }
3423
Eli Cohene126ba92013-07-07 17:25:49 +03003424 return 0;
3425
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03003426error5:
3427 mlx5_ib_destroy_srq(devr->s0);
Eli Cohene126ba92013-07-07 17:25:49 +03003428error4:
3429 mlx5_ib_dealloc_xrcd(devr->x1);
3430error3:
3431 mlx5_ib_dealloc_xrcd(devr->x0);
3432error2:
3433 mlx5_ib_destroy_cq(devr->c0);
3434error1:
3435 mlx5_ib_dealloc_pd(devr->p0);
3436error0:
3437 return ret;
3438}
3439
3440static void destroy_dev_resources(struct mlx5_ib_resources *devr)
3441{
Haggai Eran7722f472016-02-29 15:45:07 +02003442 struct mlx5_ib_dev *dev =
3443 container_of(devr, struct mlx5_ib_dev, devr);
3444 int port;
3445
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03003446 mlx5_ib_destroy_srq(devr->s1);
Eli Cohene126ba92013-07-07 17:25:49 +03003447 mlx5_ib_destroy_srq(devr->s0);
3448 mlx5_ib_dealloc_xrcd(devr->x0);
3449 mlx5_ib_dealloc_xrcd(devr->x1);
3450 mlx5_ib_destroy_cq(devr->c0);
3451 mlx5_ib_dealloc_pd(devr->p0);
Haggai Eran7722f472016-02-29 15:45:07 +02003452
3453 /* Make sure no change P_Key work items are still executing */
3454 for (port = 0; port < dev->num_ports; ++port)
3455 cancel_work_sync(&devr->ports[port].pkey_change_work);
Eli Cohene126ba92013-07-07 17:25:49 +03003456}
3457
Achiad Shochate53505a2015-12-23 18:47:25 +02003458static u32 get_core_cap_flags(struct ib_device *ibdev)
3459{
3460 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3461 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
3462 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
3463 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
3464 u32 ret = 0;
3465
3466 if (ll == IB_LINK_LAYER_INFINIBAND)
3467 return RDMA_CORE_PORT_IBA_IB;
3468
Or Gerlitz72cd5712017-01-24 13:02:36 +02003469 ret = RDMA_CORE_PORT_RAW_PACKET;
3470
Achiad Shochate53505a2015-12-23 18:47:25 +02003471 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
Or Gerlitz72cd5712017-01-24 13:02:36 +02003472 return ret;
Achiad Shochate53505a2015-12-23 18:47:25 +02003473
3474 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
Or Gerlitz72cd5712017-01-24 13:02:36 +02003475 return ret;
Achiad Shochate53505a2015-12-23 18:47:25 +02003476
3477 if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
3478 ret |= RDMA_CORE_PORT_IBA_ROCE;
3479
3480 if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
3481 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3482
3483 return ret;
3484}
3485
Ira Weiny77386132015-05-13 20:02:58 -04003486static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
3487 struct ib_port_immutable *immutable)
3488{
3489 struct ib_port_attr attr;
Or Gerlitzca5b91d2016-11-27 16:51:36 +02003490 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3491 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
Ira Weiny77386132015-05-13 20:02:58 -04003492 int err;
3493
Or Gerlitzc4550c62017-01-24 13:02:39 +02003494 immutable->core_cap_flags = get_core_cap_flags(ibdev);
3495
3496 err = ib_query_port(ibdev, port_num, &attr);
Ira Weiny77386132015-05-13 20:02:58 -04003497 if (err)
3498 return err;
3499
3500 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3501 immutable->gid_tbl_len = attr.gid_tbl_len;
Achiad Shochate53505a2015-12-23 18:47:25 +02003502 immutable->core_cap_flags = get_core_cap_flags(ibdev);
Or Gerlitzca5b91d2016-11-27 16:51:36 +02003503 if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
3504 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
Ira Weiny77386132015-05-13 20:02:58 -04003505
3506 return 0;
3507}
3508
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03003509static void get_dev_fw_str(struct ib_device *ibdev, char *str)
Ira Weinyc7342822016-06-15 02:22:01 -04003510{
3511 struct mlx5_ib_dev *dev =
3512 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03003513 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
3514 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
3515 fw_rev_sub(dev->mdev));
Ira Weinyc7342822016-06-15 02:22:01 -04003516}
3517
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003518static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
Aviv Heller9ef9c642016-09-18 20:48:01 +03003519{
3520 struct mlx5_core_dev *mdev = dev->mdev;
3521 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
3522 MLX5_FLOW_NAMESPACE_LAG);
3523 struct mlx5_flow_table *ft;
3524 int err;
3525
3526 if (!ns || !mlx5_lag_is_active(mdev))
3527 return 0;
3528
3529 err = mlx5_cmd_create_vport_lag(mdev);
3530 if (err)
3531 return err;
3532
3533 ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
3534 if (IS_ERR(ft)) {
3535 err = PTR_ERR(ft);
3536 goto err_destroy_vport_lag;
3537 }
3538
3539 dev->flow_db.lag_demux_ft = ft;
3540 return 0;
3541
3542err_destroy_vport_lag:
3543 mlx5_cmd_destroy_vport_lag(mdev);
3544 return err;
3545}
3546
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003547static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
Aviv Heller9ef9c642016-09-18 20:48:01 +03003548{
3549 struct mlx5_core_dev *mdev = dev->mdev;
3550
3551 if (dev->flow_db.lag_demux_ft) {
3552 mlx5_destroy_flow_table(dev->flow_db.lag_demux_ft);
3553 dev->flow_db.lag_demux_ft = NULL;
3554
3555 mlx5_cmd_destroy_vport_lag(mdev);
3556 }
3557}
3558
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003559static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev)
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003560{
Achiad Shochate53505a2015-12-23 18:47:25 +02003561 int err;
3562
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003563 dev->roce.nb.notifier_call = mlx5_netdev_event;
Achiad Shochate53505a2015-12-23 18:47:25 +02003564 err = register_netdevice_notifier(&dev->roce.nb);
Aviv Heller5ec8c832016-09-18 20:48:00 +03003565 if (err) {
3566 dev->roce.nb.notifier_call = NULL;
Achiad Shochate53505a2015-12-23 18:47:25 +02003567 return err;
Aviv Heller5ec8c832016-09-18 20:48:00 +03003568 }
Achiad Shochate53505a2015-12-23 18:47:25 +02003569
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003570 return 0;
3571}
Achiad Shochate53505a2015-12-23 18:47:25 +02003572
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003573static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +03003574{
3575 if (dev->roce.nb.notifier_call) {
3576 unregister_netdevice_notifier(&dev->roce.nb);
3577 dev->roce.nb.notifier_call = NULL;
3578 }
3579}
3580
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003581static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +03003582{
Eli Cohene126ba92013-07-07 17:25:49 +03003583 int err;
3584
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003585 err = mlx5_add_netdev_notifier(dev);
3586 if (err)
Achiad Shochate53505a2015-12-23 18:47:25 +02003587 return err;
Achiad Shochate53505a2015-12-23 18:47:25 +02003588
Or Gerlitzca5b91d2016-11-27 16:51:36 +02003589 if (MLX5_CAP_GEN(dev->mdev, roce)) {
3590 err = mlx5_nic_vport_enable_roce(dev->mdev);
3591 if (err)
3592 goto err_unregister_netdevice_notifier;
3593 }
Achiad Shochate53505a2015-12-23 18:47:25 +02003594
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003595 err = mlx5_eth_lag_init(dev);
Aviv Heller9ef9c642016-09-18 20:48:01 +03003596 if (err)
3597 goto err_disable_roce;
3598
Achiad Shochate53505a2015-12-23 18:47:25 +02003599 return 0;
3600
Aviv Heller9ef9c642016-09-18 20:48:01 +03003601err_disable_roce:
Or Gerlitzca5b91d2016-11-27 16:51:36 +02003602 if (MLX5_CAP_GEN(dev->mdev, roce))
3603 mlx5_nic_vport_disable_roce(dev->mdev);
Aviv Heller9ef9c642016-09-18 20:48:01 +03003604
Achiad Shochate53505a2015-12-23 18:47:25 +02003605err_unregister_netdevice_notifier:
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003606 mlx5_remove_netdev_notifier(dev);
Achiad Shochate53505a2015-12-23 18:47:25 +02003607 return err;
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003608}
3609
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003610static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003611{
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003612 mlx5_eth_lag_cleanup(dev);
Or Gerlitzca5b91d2016-11-27 16:51:36 +02003613 if (MLX5_CAP_GEN(dev->mdev, roce))
3614 mlx5_nic_vport_disable_roce(dev->mdev);
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003615}
3616
Parav Pandite1f24a72017-04-16 07:29:29 +03003617struct mlx5_ib_counter {
Kamal Heib7c16f472017-01-18 15:25:09 +02003618 const char *name;
3619 size_t offset;
3620};
3621
3622#define INIT_Q_COUNTER(_name) \
3623 { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
3624
Parav Pandite1f24a72017-04-16 07:29:29 +03003625static const struct mlx5_ib_counter basic_q_cnts[] = {
Kamal Heib7c16f472017-01-18 15:25:09 +02003626 INIT_Q_COUNTER(rx_write_requests),
3627 INIT_Q_COUNTER(rx_read_requests),
3628 INIT_Q_COUNTER(rx_atomic_requests),
3629 INIT_Q_COUNTER(out_of_buffer),
3630};
3631
Parav Pandite1f24a72017-04-16 07:29:29 +03003632static const struct mlx5_ib_counter out_of_seq_q_cnts[] = {
Kamal Heib7c16f472017-01-18 15:25:09 +02003633 INIT_Q_COUNTER(out_of_sequence),
3634};
3635
Parav Pandite1f24a72017-04-16 07:29:29 +03003636static const struct mlx5_ib_counter retrans_q_cnts[] = {
Kamal Heib7c16f472017-01-18 15:25:09 +02003637 INIT_Q_COUNTER(duplicate_request),
3638 INIT_Q_COUNTER(rnr_nak_retry_err),
3639 INIT_Q_COUNTER(packet_seq_err),
3640 INIT_Q_COUNTER(implied_nak_seq_err),
3641 INIT_Q_COUNTER(local_ack_timeout_err),
3642};
3643
Parav Pandite1f24a72017-04-16 07:29:29 +03003644#define INIT_CONG_COUNTER(_name) \
3645 { .name = #_name, .offset = \
3646 MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)}
3647
3648static const struct mlx5_ib_counter cong_cnts[] = {
3649 INIT_CONG_COUNTER(rp_cnp_ignored),
3650 INIT_CONG_COUNTER(rp_cnp_handled),
3651 INIT_CONG_COUNTER(np_ecn_marked_roce_packets),
3652 INIT_CONG_COUNTER(np_cnp_sent),
3653};
3654
Parav Pandit58dcb602017-06-19 07:19:37 +03003655static const struct mlx5_ib_counter extended_err_cnts[] = {
3656 INIT_Q_COUNTER(resp_local_length_error),
3657 INIT_Q_COUNTER(resp_cqe_error),
3658 INIT_Q_COUNTER(req_cqe_error),
3659 INIT_Q_COUNTER(req_remote_invalid_request),
3660 INIT_Q_COUNTER(req_remote_access_errors),
3661 INIT_Q_COUNTER(resp_remote_access_errors),
3662 INIT_Q_COUNTER(resp_cqe_flush_error),
3663 INIT_Q_COUNTER(req_cqe_flush_error),
3664};
3665
Parav Pandite1f24a72017-04-16 07:29:29 +03003666static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
Mark Bloch0837e862016-06-17 15:10:55 +03003667{
3668 unsigned int i;
3669
Kamal Heib7c16f472017-01-18 15:25:09 +02003670 for (i = 0; i < dev->num_ports; i++) {
Mark Bloch0837e862016-06-17 15:10:55 +03003671 mlx5_core_dealloc_q_counter(dev->mdev,
Parav Pandite1f24a72017-04-16 07:29:29 +03003672 dev->port[i].cnts.set_id);
3673 kfree(dev->port[i].cnts.names);
3674 kfree(dev->port[i].cnts.offsets);
Kamal Heib7c16f472017-01-18 15:25:09 +02003675 }
3676}
3677
Parav Pandite1f24a72017-04-16 07:29:29 +03003678static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
3679 struct mlx5_ib_counters *cnts)
Kamal Heib7c16f472017-01-18 15:25:09 +02003680{
3681 u32 num_counters;
3682
3683 num_counters = ARRAY_SIZE(basic_q_cnts);
3684
3685 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
3686 num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
3687
3688 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
3689 num_counters += ARRAY_SIZE(retrans_q_cnts);
Parav Pandit58dcb602017-06-19 07:19:37 +03003690
3691 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
3692 num_counters += ARRAY_SIZE(extended_err_cnts);
3693
Parav Pandite1f24a72017-04-16 07:29:29 +03003694 cnts->num_q_counters = num_counters;
Kamal Heib7c16f472017-01-18 15:25:09 +02003695
Parav Pandite1f24a72017-04-16 07:29:29 +03003696 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
3697 cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
3698 num_counters += ARRAY_SIZE(cong_cnts);
3699 }
3700
3701 cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
3702 if (!cnts->names)
Kamal Heib7c16f472017-01-18 15:25:09 +02003703 return -ENOMEM;
3704
Parav Pandite1f24a72017-04-16 07:29:29 +03003705 cnts->offsets = kcalloc(num_counters,
3706 sizeof(cnts->offsets), GFP_KERNEL);
3707 if (!cnts->offsets)
Kamal Heib7c16f472017-01-18 15:25:09 +02003708 goto err_names;
3709
Kamal Heib7c16f472017-01-18 15:25:09 +02003710 return 0;
3711
3712err_names:
Parav Pandite1f24a72017-04-16 07:29:29 +03003713 kfree(cnts->names);
Kamal Heib7c16f472017-01-18 15:25:09 +02003714 return -ENOMEM;
3715}
3716
Parav Pandite1f24a72017-04-16 07:29:29 +03003717static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
3718 const char **names,
3719 size_t *offsets)
Kamal Heib7c16f472017-01-18 15:25:09 +02003720{
3721 int i;
3722 int j = 0;
3723
3724 for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
3725 names[j] = basic_q_cnts[i].name;
3726 offsets[j] = basic_q_cnts[i].offset;
3727 }
3728
3729 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
3730 for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
3731 names[j] = out_of_seq_q_cnts[i].name;
3732 offsets[j] = out_of_seq_q_cnts[i].offset;
3733 }
3734 }
3735
3736 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
3737 for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
3738 names[j] = retrans_q_cnts[i].name;
3739 offsets[j] = retrans_q_cnts[i].offset;
3740 }
3741 }
Parav Pandite1f24a72017-04-16 07:29:29 +03003742
Parav Pandit58dcb602017-06-19 07:19:37 +03003743 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
3744 for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
3745 names[j] = extended_err_cnts[i].name;
3746 offsets[j] = extended_err_cnts[i].offset;
3747 }
3748 }
3749
Parav Pandite1f24a72017-04-16 07:29:29 +03003750 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
3751 for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
3752 names[j] = cong_cnts[i].name;
3753 offsets[j] = cong_cnts[i].offset;
3754 }
3755 }
Mark Bloch0837e862016-06-17 15:10:55 +03003756}
3757
Parav Pandite1f24a72017-04-16 07:29:29 +03003758static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
Mark Bloch0837e862016-06-17 15:10:55 +03003759{
3760 int i;
3761 int ret;
3762
3763 for (i = 0; i < dev->num_ports; i++) {
Kamal Heib7c16f472017-01-18 15:25:09 +02003764 struct mlx5_ib_port *port = &dev->port[i];
3765
Mark Bloch0837e862016-06-17 15:10:55 +03003766 ret = mlx5_core_alloc_q_counter(dev->mdev,
Parav Pandite1f24a72017-04-16 07:29:29 +03003767 &port->cnts.set_id);
Mark Bloch0837e862016-06-17 15:10:55 +03003768 if (ret) {
3769 mlx5_ib_warn(dev,
3770 "couldn't allocate queue counter for port %d, err %d\n",
3771 i + 1, ret);
3772 goto dealloc_counters;
3773 }
Kamal Heib7c16f472017-01-18 15:25:09 +02003774
Parav Pandite1f24a72017-04-16 07:29:29 +03003775 ret = __mlx5_ib_alloc_counters(dev, &port->cnts);
Kamal Heib7c16f472017-01-18 15:25:09 +02003776 if (ret)
3777 goto dealloc_counters;
3778
Parav Pandite1f24a72017-04-16 07:29:29 +03003779 mlx5_ib_fill_counters(dev, port->cnts.names,
3780 port->cnts.offsets);
Mark Bloch0837e862016-06-17 15:10:55 +03003781 }
3782
3783 return 0;
3784
3785dealloc_counters:
3786 while (--i >= 0)
3787 mlx5_core_dealloc_q_counter(dev->mdev,
Parav Pandite1f24a72017-04-16 07:29:29 +03003788 dev->port[i].cnts.set_id);
Mark Bloch0837e862016-06-17 15:10:55 +03003789
3790 return ret;
3791}
3792
Mark Bloch0ad17a82016-06-17 15:10:56 +03003793static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
3794 u8 port_num)
3795{
Kamal Heib7c16f472017-01-18 15:25:09 +02003796 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3797 struct mlx5_ib_port *port = &dev->port[port_num - 1];
Mark Bloch0ad17a82016-06-17 15:10:56 +03003798
3799 /* We support only per port stats */
3800 if (port_num == 0)
3801 return NULL;
3802
Parav Pandite1f24a72017-04-16 07:29:29 +03003803 return rdma_alloc_hw_stats_struct(port->cnts.names,
3804 port->cnts.num_q_counters +
3805 port->cnts.num_cong_counters,
Mark Bloch0ad17a82016-06-17 15:10:56 +03003806 RDMA_HW_STATS_DEFAULT_LIFESPAN);
3807}
3808
Parav Pandite1f24a72017-04-16 07:29:29 +03003809static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev,
3810 struct mlx5_ib_port *port,
3811 struct rdma_hw_stats *stats)
3812{
3813 int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
3814 void *out;
3815 __be32 val;
3816 int ret, i;
3817
Leon Romanovsky1b9a07e2017-05-10 21:32:18 +03003818 out = kvzalloc(outlen, GFP_KERNEL);
Parav Pandite1f24a72017-04-16 07:29:29 +03003819 if (!out)
3820 return -ENOMEM;
3821
3822 ret = mlx5_core_query_q_counter(dev->mdev,
3823 port->cnts.set_id, 0,
3824 out, outlen);
3825 if (ret)
3826 goto free;
3827
3828 for (i = 0; i < port->cnts.num_q_counters; i++) {
3829 val = *(__be32 *)(out + port->cnts.offsets[i]);
3830 stats->value[i] = (u64)be32_to_cpu(val);
3831 }
3832
3833free:
3834 kvfree(out);
3835 return ret;
3836}
3837
Mark Bloch0ad17a82016-06-17 15:10:56 +03003838static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
3839 struct rdma_hw_stats *stats,
Kamal Heib7c16f472017-01-18 15:25:09 +02003840 u8 port_num, int index)
Mark Bloch0ad17a82016-06-17 15:10:56 +03003841{
3842 struct mlx5_ib_dev *dev = to_mdev(ibdev);
Kamal Heib7c16f472017-01-18 15:25:09 +02003843 struct mlx5_ib_port *port = &dev->port[port_num - 1];
Parav Pandite1f24a72017-04-16 07:29:29 +03003844 int ret, num_counters;
Mark Bloch0ad17a82016-06-17 15:10:56 +03003845
Kamal Heib7c16f472017-01-18 15:25:09 +02003846 if (!stats)
Parav Pandite1f24a72017-04-16 07:29:29 +03003847 return -EINVAL;
Mark Bloch0ad17a82016-06-17 15:10:56 +03003848
Parav Pandite1f24a72017-04-16 07:29:29 +03003849 ret = mlx5_ib_query_q_counters(dev, port, stats);
Mark Bloch0ad17a82016-06-17 15:10:56 +03003850 if (ret)
Parav Pandite1f24a72017-04-16 07:29:29 +03003851 return ret;
3852 num_counters = port->cnts.num_q_counters;
Mark Bloch0ad17a82016-06-17 15:10:56 +03003853
Parav Pandite1f24a72017-04-16 07:29:29 +03003854 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
Majd Dibbiny71a0ff62017-12-21 17:38:26 +02003855 ret = mlx5_lag_query_cong_counters(dev->mdev,
3856 stats->value +
3857 port->cnts.num_q_counters,
3858 port->cnts.num_cong_counters,
3859 port->cnts.offsets +
3860 port->cnts.num_q_counters);
Parav Pandite1f24a72017-04-16 07:29:29 +03003861 if (ret)
3862 return ret;
3863 num_counters += port->cnts.num_cong_counters;
Mark Bloch0ad17a82016-06-17 15:10:56 +03003864 }
Kamal Heib7c16f472017-01-18 15:25:09 +02003865
Parav Pandite1f24a72017-04-16 07:29:29 +03003866 return num_counters;
Mark Bloch0ad17a82016-06-17 15:10:56 +03003867}
3868
Niranjana Vishwanathapura8e959602017-06-30 13:14:46 -07003869static void mlx5_ib_free_rdma_netdev(struct net_device *netdev)
3870{
3871 return mlx5_rdma_netdev_free(netdev);
3872}
3873
Erez Shitrit693dfd52017-04-27 17:01:34 +03003874static struct net_device*
3875mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
3876 u8 port_num,
3877 enum rdma_netdev_t type,
3878 const char *name,
3879 unsigned char name_assign_type,
3880 void (*setup)(struct net_device *))
3881{
Niranjana Vishwanathapura8e959602017-06-30 13:14:46 -07003882 struct net_device *netdev;
3883 struct rdma_netdev *rn;
3884
Erez Shitrit693dfd52017-04-27 17:01:34 +03003885 if (type != RDMA_NETDEV_IPOIB)
3886 return ERR_PTR(-EOPNOTSUPP);
3887
Niranjana Vishwanathapura8e959602017-06-30 13:14:46 -07003888 netdev = mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca,
3889 name, setup);
3890 if (likely(!IS_ERR_OR_NULL(netdev))) {
3891 rn = netdev_priv(netdev);
3892 rn->free_rdma_netdev = mlx5_ib_free_rdma_netdev;
3893 }
3894 return netdev;
Erez Shitrit693dfd52017-04-27 17:01:34 +03003895}
3896
Maor Gottliebfe248c32017-05-30 10:29:14 +03003897static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
3898{
3899 if (!dev->delay_drop.dbg)
3900 return;
3901 debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
3902 kfree(dev->delay_drop.dbg);
3903 dev->delay_drop.dbg = NULL;
3904}
3905
Maor Gottlieb03404e82017-05-30 10:29:13 +03003906static void cancel_delay_drop(struct mlx5_ib_dev *dev)
3907{
3908 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
3909 return;
3910
3911 cancel_work_sync(&dev->delay_drop.delay_drop_work);
Maor Gottliebfe248c32017-05-30 10:29:14 +03003912 delay_drop_debugfs_cleanup(dev);
3913}
3914
3915static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
3916 size_t count, loff_t *pos)
3917{
3918 struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
3919 char lbuf[20];
3920 int len;
3921
3922 len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
3923 return simple_read_from_buffer(buf, count, pos, lbuf, len);
3924}
3925
3926static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
3927 size_t count, loff_t *pos)
3928{
3929 struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
3930 u32 timeout;
3931 u32 var;
3932
3933 if (kstrtouint_from_user(buf, count, 0, &var))
3934 return -EFAULT;
3935
3936 timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
3937 1000);
3938 if (timeout != var)
3939 mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
3940 timeout);
3941
3942 delay_drop->timeout = timeout;
3943
3944 return count;
3945}
3946
3947static const struct file_operations fops_delay_drop_timeout = {
3948 .owner = THIS_MODULE,
3949 .open = simple_open,
3950 .write = delay_drop_timeout_write,
3951 .read = delay_drop_timeout_read,
3952};
3953
3954static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
3955{
3956 struct mlx5_ib_dbg_delay_drop *dbg;
3957
3958 if (!mlx5_debugfs_root)
3959 return 0;
3960
3961 dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
3962 if (!dbg)
3963 return -ENOMEM;
3964
Sudip Mukherjeecbafad82017-09-18 12:28:48 +01003965 dev->delay_drop.dbg = dbg;
3966
Maor Gottliebfe248c32017-05-30 10:29:14 +03003967 dbg->dir_debugfs =
3968 debugfs_create_dir("delay_drop",
3969 dev->mdev->priv.dbg_root);
3970 if (!dbg->dir_debugfs)
Sudip Mukherjeecbafad82017-09-18 12:28:48 +01003971 goto out_debugfs;
Maor Gottliebfe248c32017-05-30 10:29:14 +03003972
3973 dbg->events_cnt_debugfs =
3974 debugfs_create_atomic_t("num_timeout_events", 0400,
3975 dbg->dir_debugfs,
3976 &dev->delay_drop.events_cnt);
3977 if (!dbg->events_cnt_debugfs)
3978 goto out_debugfs;
3979
3980 dbg->rqs_cnt_debugfs =
3981 debugfs_create_atomic_t("num_rqs", 0400,
3982 dbg->dir_debugfs,
3983 &dev->delay_drop.rqs_cnt);
3984 if (!dbg->rqs_cnt_debugfs)
3985 goto out_debugfs;
3986
3987 dbg->timeout_debugfs =
3988 debugfs_create_file("timeout", 0600,
3989 dbg->dir_debugfs,
3990 &dev->delay_drop,
3991 &fops_delay_drop_timeout);
3992 if (!dbg->timeout_debugfs)
3993 goto out_debugfs;
3994
3995 return 0;
3996
3997out_debugfs:
3998 delay_drop_debugfs_cleanup(dev);
3999 return -ENOMEM;
Maor Gottlieb03404e82017-05-30 10:29:13 +03004000}
4001
4002static void init_delay_drop(struct mlx5_ib_dev *dev)
4003{
4004 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
4005 return;
4006
4007 mutex_init(&dev->delay_drop.lock);
4008 dev->delay_drop.dev = dev;
4009 dev->delay_drop.activate = false;
4010 dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
4011 INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
Maor Gottliebfe248c32017-05-30 10:29:14 +03004012 atomic_set(&dev->delay_drop.rqs_cnt, 0);
4013 atomic_set(&dev->delay_drop.events_cnt, 0);
4014
4015 if (delay_drop_debugfs_init(dev))
4016 mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
Maor Gottlieb03404e82017-05-30 10:29:13 +03004017}
4018
Leon Romanovsky84305d712017-08-17 15:50:53 +03004019static const struct cpumask *
4020mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
Sagi Grimberg40b24402017-07-13 11:09:42 +03004021{
4022 struct mlx5_ib_dev *dev = to_mdev(ibdev);
4023
4024 return mlx5_get_vector_affinity(dev->mdev, comp_vector);
4025}
4026
Mark Bloch16c19752018-01-01 13:06:58 +02004027static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +03004028{
Mark Bloch16c19752018-01-01 13:06:58 +02004029 kfree(dev->port);
4030}
4031
4032static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
4033{
4034 struct mlx5_core_dev *mdev = dev->mdev;
Aviv Heller4babcf92016-09-18 20:48:03 +03004035 const char *name;
Eli Cohene126ba92013-07-07 17:25:49 +03004036 int err;
Eli Cohene126ba92013-07-07 17:25:49 +03004037
Mark Bloch0837e862016-06-17 15:10:55 +03004038 dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
4039 GFP_KERNEL);
4040 if (!dev->port)
Mark Bloch16c19752018-01-01 13:06:58 +02004041 return -ENOMEM;
Mark Bloch0837e862016-06-17 15:10:55 +03004042
Achiad Shochatfc24fc52015-12-23 18:47:17 +02004043 rwlock_init(&dev->roce.netdev_lock);
Eli Cohene126ba92013-07-07 17:25:49 +03004044 err = get_port_caps(dev);
4045 if (err)
Mark Bloch0837e862016-06-17 15:10:55 +03004046 goto err_free_port;
Eli Cohene126ba92013-07-07 17:25:49 +03004047
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03004048 if (mlx5_use_mad_ifc(dev))
4049 get_ext_port_caps(dev);
Eli Cohene126ba92013-07-07 17:25:49 +03004050
Aviv Heller4babcf92016-09-18 20:48:03 +03004051 if (!mlx5_lag_is_active(mdev))
4052 name = "mlx5_%d";
4053 else
4054 name = "mlx5_bond_%d";
4055
4056 strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX);
Eli Cohene126ba92013-07-07 17:25:49 +03004057 dev->ib_dev.owner = THIS_MODULE;
4058 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
Sagi Grimbergc6790aa2015-09-24 10:34:23 +03004059 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
Saeed Mahameed938fe832015-05-28 22:28:41 +03004060 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
Eli Cohene126ba92013-07-07 17:25:49 +03004061 dev->ib_dev.phys_port_cnt = dev->num_ports;
Saeed Mahameed233d05d2015-04-02 17:07:32 +03004062 dev->ib_dev.num_comp_vectors =
4063 dev->mdev->priv.eq_table.num_comp_vectors;
Bart Van Assche9b0c2892017-01-20 13:04:21 -08004064 dev->ib_dev.dev.parent = &mdev->pdev->dev;
Eli Cohene126ba92013-07-07 17:25:49 +03004065
Mark Bloch16c19752018-01-01 13:06:58 +02004066 return 0;
4067
4068err_free_port:
4069 kfree(dev->port);
4070
4071 return -ENOMEM;
4072}
4073
4074static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
4075{
4076 struct mlx5_core_dev *mdev = dev->mdev;
Mark Bloch16c19752018-01-01 13:06:58 +02004077 int err;
4078
Eli Cohene126ba92013-07-07 17:25:49 +03004079 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
4080 dev->ib_dev.uverbs_cmd_mask =
4081 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
4082 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
4083 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
4084 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
4085 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
Moni Shoua41c450f2016-11-23 08:23:26 +02004086 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
4087 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
Eli Cohene126ba92013-07-07 17:25:49 +03004088 (1ull << IB_USER_VERBS_CMD_REG_MR) |
Noa Osherovich56e11d62016-02-29 16:46:51 +02004089 (1ull << IB_USER_VERBS_CMD_REREG_MR) |
Eli Cohene126ba92013-07-07 17:25:49 +03004090 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
4091 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
4092 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
4093 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
4094 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
4095 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
4096 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
4097 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
4098 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
4099 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
4100 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
4101 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
4102 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
4103 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
4104 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
4105 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
4106 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
Haggai Eran1707cb42015-02-08 13:28:52 +02004107 dev->ib_dev.uverbs_ex_cmd_mask =
Matan Barakd4584dd2016-01-28 17:51:46 +02004108 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
4109 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
Bodong Wang7d29f342016-12-01 13:43:16 +02004110 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
Yonatan Cohenb0e9df62017-11-13 10:51:15 +02004111 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) |
4112 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
Eli Cohene126ba92013-07-07 17:25:49 +03004113
4114 dev->ib_dev.query_device = mlx5_ib_query_device;
4115 dev->ib_dev.query_port = mlx5_ib_query_port;
Achiad Shochatebd61f62015-12-23 18:47:16 +02004116 dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
Eli Cohene126ba92013-07-07 17:25:49 +03004117 dev->ib_dev.query_gid = mlx5_ib_query_gid;
Achiad Shochat3cca2602015-12-23 18:47:23 +02004118 dev->ib_dev.add_gid = mlx5_ib_add_gid;
4119 dev->ib_dev.del_gid = mlx5_ib_del_gid;
Eli Cohene126ba92013-07-07 17:25:49 +03004120 dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
4121 dev->ib_dev.modify_device = mlx5_ib_modify_device;
4122 dev->ib_dev.modify_port = mlx5_ib_modify_port;
4123 dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;
4124 dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext;
4125 dev->ib_dev.mmap = mlx5_ib_mmap;
4126 dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd;
4127 dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd;
4128 dev->ib_dev.create_ah = mlx5_ib_create_ah;
4129 dev->ib_dev.query_ah = mlx5_ib_query_ah;
4130 dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah;
4131 dev->ib_dev.create_srq = mlx5_ib_create_srq;
4132 dev->ib_dev.modify_srq = mlx5_ib_modify_srq;
4133 dev->ib_dev.query_srq = mlx5_ib_query_srq;
4134 dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq;
4135 dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv;
4136 dev->ib_dev.create_qp = mlx5_ib_create_qp;
4137 dev->ib_dev.modify_qp = mlx5_ib_modify_qp;
4138 dev->ib_dev.query_qp = mlx5_ib_query_qp;
4139 dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp;
4140 dev->ib_dev.post_send = mlx5_ib_post_send;
4141 dev->ib_dev.post_recv = mlx5_ib_post_recv;
4142 dev->ib_dev.create_cq = mlx5_ib_create_cq;
4143 dev->ib_dev.modify_cq = mlx5_ib_modify_cq;
4144 dev->ib_dev.resize_cq = mlx5_ib_resize_cq;
4145 dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq;
4146 dev->ib_dev.poll_cq = mlx5_ib_poll_cq;
4147 dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq;
4148 dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
4149 dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
Noa Osherovich56e11d62016-02-29 16:46:51 +02004150 dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr;
Eli Cohene126ba92013-07-07 17:25:49 +03004151 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
4152 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
4153 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
4154 dev->ib_dev.process_mad = mlx5_ib_process_mad;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03004155 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03004156 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02004157 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
Ira Weiny77386132015-05-13 20:02:58 -04004158 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
Ira Weinyc7342822016-06-15 02:22:01 -04004159 dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
Sagi Grimberg40b24402017-07-13 11:09:42 +03004160 dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
Niranjana Vishwanathapura8e959602017-06-30 13:14:46 -07004161 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
Alex Vesker022d0382017-06-14 09:59:06 +03004162 dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
Niranjana Vishwanathapura8e959602017-06-30 13:14:46 -07004163
Eli Coheneff901d2016-03-11 22:58:42 +02004164 if (mlx5_core_is_pf(mdev)) {
4165 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
4166 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
4167 dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats;
4168 dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid;
4169 }
Eli Cohene126ba92013-07-07 17:25:49 +03004170
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03004171 dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
4172
Max Gurtovoy6e8484c2017-05-28 10:53:11 +03004173 dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
4174
Matan Barakd2370e02016-02-29 18:05:30 +02004175 if (MLX5_CAP_GEN(mdev, imaicl)) {
4176 dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
4177 dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw;
4178 dev->ib_dev.uverbs_cmd_mask |=
4179 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
4180 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
4181 }
4182
Saeed Mahameed938fe832015-05-28 22:28:41 +03004183 if (MLX5_CAP_GEN(mdev, xrc)) {
Eli Cohene126ba92013-07-07 17:25:49 +03004184 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
4185 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
4186 dev->ib_dev.uverbs_cmd_mask |=
4187 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
4188 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
4189 }
4190
Yishai Hadas81e30882017-06-08 16:15:09 +03004191 dev->ib_dev.create_flow = mlx5_ib_create_flow;
4192 dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
4193 dev->ib_dev.uverbs_ex_cmd_mask |=
4194 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
4195 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
4196
Eli Cohene126ba92013-07-07 17:25:49 +03004197 err = init_node_data(dev);
4198 if (err)
Mark Bloch16c19752018-01-01 13:06:58 +02004199 return err;
Eli Cohene126ba92013-07-07 17:25:49 +03004200
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02004201 mutex_init(&dev->flow_db.lock);
Eli Cohene126ba92013-07-07 17:25:49 +03004202 mutex_init(&dev->cap_mask_mutex);
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03004203 INIT_LIST_HEAD(&dev->qp_list);
4204 spin_lock_init(&dev->reset_flow_resource_lock);
Eli Cohene126ba92013-07-07 17:25:49 +03004205
Mark Bloch16c19752018-01-01 13:06:58 +02004206 return 0;
4207}
4208
4209static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
4210{
4211 struct mlx5_core_dev *mdev = dev->mdev;
4212 enum rdma_link_layer ll;
4213 int port_type_cap;
4214 int err;
4215
4216 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4217 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4218
Achiad Shochatfc24fc52015-12-23 18:47:17 +02004219 if (ll == IB_LINK_LAYER_ETHERNET) {
Mark Blochc11a2262018-01-01 13:06:59 +02004220 dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
4221 dev->ib_dev.create_wq = mlx5_ib_create_wq;
4222 dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
4223 dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
4224 dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
4225 dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
4226 dev->ib_dev.uverbs_ex_cmd_mask |=
4227 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
4228 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
4229 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
4230 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
4231 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
Or Gerlitz45f95ac2016-11-27 16:51:35 +02004232 err = mlx5_enable_eth(dev);
Achiad Shochatfc24fc52015-12-23 18:47:17 +02004233 if (err)
Mark Bloch16c19752018-01-01 13:06:58 +02004234 return err;
Moni Shouafd65f1b2017-05-30 09:56:05 +03004235 dev->roce.last_port_state = IB_PORT_DOWN;
Achiad Shochatfc24fc52015-12-23 18:47:17 +02004236 }
4237
Mark Bloch16c19752018-01-01 13:06:58 +02004238 return 0;
4239}
Eli Cohene126ba92013-07-07 17:25:49 +03004240
Mark Bloch16c19752018-01-01 13:06:58 +02004241static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
4242{
4243 struct mlx5_core_dev *mdev = dev->mdev;
4244 enum rdma_link_layer ll;
4245 int port_type_cap;
Eli Cohene126ba92013-07-07 17:25:49 +03004246
Mark Bloch16c19752018-01-01 13:06:58 +02004247 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4248 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4249
4250 if (ll == IB_LINK_LAYER_ETHERNET) {
4251 mlx5_disable_eth(dev);
4252 mlx5_remove_netdev_notifier(dev);
Kamal Heib45bded22017-01-18 14:10:32 +02004253 }
Mark Bloch16c19752018-01-01 13:06:58 +02004254}
Haggai Eran6aec21f2014-12-11 17:04:23 +02004255
Mark Bloch16c19752018-01-01 13:06:58 +02004256static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
4257{
4258 return create_dev_resources(&dev->devr);
4259}
Parav Pandit4a2da0b2017-05-30 10:05:15 +03004260
Mark Bloch16c19752018-01-01 13:06:58 +02004261static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
4262{
4263 destroy_dev_resources(&dev->devr);
4264}
4265
4266static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
4267{
Mark Bloch07321b32018-01-01 13:07:00 +02004268 mlx5_ib_internal_fill_odp_caps(dev);
4269
Mark Bloch16c19752018-01-01 13:06:58 +02004270 return mlx5_ib_odp_init_one(dev);
4271}
4272
4273static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
4274{
4275 mlx5_ib_odp_remove_one(dev);
4276}
4277
4278static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
4279{
Mark Bloch5e1e7612018-01-01 13:07:01 +02004280 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
4281 dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
4282 dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
4283
4284 return mlx5_ib_alloc_counters(dev);
4285 }
Mark Bloch16c19752018-01-01 13:06:58 +02004286
4287 return 0;
4288}
4289
4290static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
4291{
4292 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
4293 mlx5_ib_dealloc_counters(dev);
4294}
4295
4296static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
4297{
4298 return mlx5_ib_init_cong_debugfs(dev);
4299}
4300
4301static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
4302{
4303 mlx5_ib_cleanup_cong_debugfs(dev);
4304}
4305
4306static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
4307{
Eli Cohen5fe9dec2017-01-03 23:55:25 +02004308 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
4309 if (!dev->mdev->priv.uar)
Mark Bloch16c19752018-01-01 13:06:58 +02004310 return -ENOMEM;
4311 return 0;
4312}
4313
4314static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
4315{
4316 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
4317}
4318
4319static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
4320{
4321 int err;
Eli Cohen5fe9dec2017-01-03 23:55:25 +02004322
4323 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
4324 if (err)
Mark Bloch16c19752018-01-01 13:06:58 +02004325 return err;
Eli Cohen5fe9dec2017-01-03 23:55:25 +02004326
4327 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
4328 if (err)
Mark Bloch16c19752018-01-01 13:06:58 +02004329 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
Eli Cohen5fe9dec2017-01-03 23:55:25 +02004330
Mark Bloch16c19752018-01-01 13:06:58 +02004331 return err;
4332}
Mark Bloch0837e862016-06-17 15:10:55 +03004333
Mark Bloch16c19752018-01-01 13:06:58 +02004334static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
4335{
4336 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
4337 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4338}
Eli Cohene126ba92013-07-07 17:25:49 +03004339
Mark Bloch16c19752018-01-01 13:06:58 +02004340static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
4341{
4342 return ib_register_device(&dev->ib_dev, NULL);
4343}
4344
4345static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
4346{
4347 ib_unregister_device(&dev->ib_dev);
4348}
4349
4350static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev)
4351{
4352 return create_umr_res(dev);
4353}
4354
4355static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev)
4356{
4357 destroy_umrc_res(dev);
4358}
4359
4360static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
4361{
Maor Gottlieb03404e82017-05-30 10:29:13 +03004362 init_delay_drop(dev);
4363
Mark Bloch16c19752018-01-01 13:06:58 +02004364 return 0;
4365}
4366
4367static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
4368{
4369 cancel_delay_drop(dev);
4370}
4371
4372static int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
4373{
4374 int err;
4375 int i;
4376
Eli Cohene126ba92013-07-07 17:25:49 +03004377 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
Wei Yongjun281d1a92013-07-30 07:54:26 +08004378 err = device_create_file(&dev->ib_dev.dev,
4379 mlx5_class_attributes[i]);
4380 if (err)
Mark Bloch16c19752018-01-01 13:06:58 +02004381 return err;
Eli Cohene126ba92013-07-07 17:25:49 +03004382 }
4383
Mark Bloch16c19752018-01-01 13:06:58 +02004384 return 0;
4385}
4386
4387static int mlx5_ib_stage_loopback_init(struct mlx5_ib_dev *dev)
4388{
4389 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
4390 MLX5_CAP_GEN(dev->mdev, disable_local_lb))
Huy Nguyenc85023e2017-05-30 09:42:54 +03004391 mutex_init(&dev->lb_mutex);
4392
Mark Bloch16c19752018-01-01 13:06:58 +02004393 return 0;
4394}
4395
4396static void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
4397 const struct mlx5_ib_profile *profile,
4398 int stage)
4399{
4400 /* Number of stages to cleanup */
4401 while (stage) {
4402 stage--;
4403 if (profile->stage[stage].cleanup)
4404 profile->stage[stage].cleanup(dev);
4405 }
4406
4407 ib_dealloc_device((struct ib_device *)dev);
4408}
4409
4410static void *__mlx5_ib_add(struct mlx5_core_dev *mdev,
4411 const struct mlx5_ib_profile *profile)
4412{
4413 struct mlx5_ib_dev *dev;
4414 int err;
4415 int i;
4416
4417 printk_once(KERN_INFO "%s", mlx5_version);
4418
4419 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
4420 if (!dev)
4421 return NULL;
4422
4423 dev->mdev = mdev;
4424
4425 for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
4426 if (profile->stage[i].init) {
4427 err = profile->stage[i].init(dev);
4428 if (err)
4429 goto err_out;
4430 }
4431 }
4432
4433 dev->profile = profile;
Eli Cohene126ba92013-07-07 17:25:49 +03004434 dev->ib_active = true;
4435
Jack Morgenstein9603b612014-07-28 23:30:22 +03004436 return dev;
Eli Cohene126ba92013-07-07 17:25:49 +03004437
Mark Bloch16c19752018-01-01 13:06:58 +02004438err_out:
4439 __mlx5_ib_remove(dev, profile, i);
Eli Cohene126ba92013-07-07 17:25:49 +03004440
Jack Morgenstein9603b612014-07-28 23:30:22 +03004441 return NULL;
Eli Cohene126ba92013-07-07 17:25:49 +03004442}
4443
Mark Bloch16c19752018-01-01 13:06:58 +02004444static const struct mlx5_ib_profile pf_profile = {
4445 STAGE_CREATE(MLX5_IB_STAGE_INIT,
4446 mlx5_ib_stage_init_init,
4447 mlx5_ib_stage_init_cleanup),
4448 STAGE_CREATE(MLX5_IB_STAGE_CAPS,
4449 mlx5_ib_stage_caps_init,
4450 NULL),
4451 STAGE_CREATE(MLX5_IB_STAGE_ROCE,
4452 mlx5_ib_stage_roce_init,
4453 mlx5_ib_stage_roce_cleanup),
4454 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
4455 mlx5_ib_stage_dev_res_init,
4456 mlx5_ib_stage_dev_res_cleanup),
4457 STAGE_CREATE(MLX5_IB_STAGE_ODP,
4458 mlx5_ib_stage_odp_init,
4459 mlx5_ib_stage_odp_cleanup),
4460 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
4461 mlx5_ib_stage_counters_init,
4462 mlx5_ib_stage_counters_cleanup),
4463 STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
4464 mlx5_ib_stage_cong_debugfs_init,
4465 mlx5_ib_stage_cong_debugfs_cleanup),
4466 STAGE_CREATE(MLX5_IB_STAGE_UAR,
4467 mlx5_ib_stage_uar_init,
4468 mlx5_ib_stage_uar_cleanup),
4469 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
4470 mlx5_ib_stage_bfrag_init,
4471 mlx5_ib_stage_bfrag_cleanup),
4472 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
4473 mlx5_ib_stage_ib_reg_init,
4474 mlx5_ib_stage_ib_reg_cleanup),
4475 STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
4476 mlx5_ib_stage_umr_res_init,
4477 mlx5_ib_stage_umr_res_cleanup),
4478 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
4479 mlx5_ib_stage_delay_drop_init,
4480 mlx5_ib_stage_delay_drop_cleanup),
4481 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
4482 mlx5_ib_stage_class_attr_init,
4483 NULL),
4484 STAGE_CREATE(MLX5_IB_STAGE_LOOPBACK,
4485 mlx5_ib_stage_loopback_init,
4486 NULL),
4487};
4488
4489static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
4490{
4491 return __mlx5_ib_add(mdev, &pf_profile);
4492}
4493
Jack Morgenstein9603b612014-07-28 23:30:22 +03004494static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
Eli Cohene126ba92013-07-07 17:25:49 +03004495{
Jack Morgenstein9603b612014-07-28 23:30:22 +03004496 struct mlx5_ib_dev *dev = context;
Haggai Eran6aec21f2014-12-11 17:04:23 +02004497
Mark Bloch16c19752018-01-01 13:06:58 +02004498 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
Eli Cohene126ba92013-07-07 17:25:49 +03004499}
4500
Jack Morgenstein9603b612014-07-28 23:30:22 +03004501static struct mlx5_interface mlx5_ib_interface = {
4502 .add = mlx5_ib_add,
4503 .remove = mlx5_ib_remove,
4504 .event = mlx5_ib_event,
Artemy Kovalyovd9aaed82017-01-02 11:37:46 +02004505#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
4506 .pfault = mlx5_ib_pfault,
4507#endif
Saeed Mahameed64613d942015-04-02 17:07:34 +03004508 .protocol = MLX5_INTERFACE_PROTOCOL_IB,
Eli Cohene126ba92013-07-07 17:25:49 +03004509};
4510
4511static int __init mlx5_ib_init(void)
4512{
Haggai Eran6aec21f2014-12-11 17:04:23 +02004513 int err;
4514
Artemy Kovalyov81713d32017-01-18 16:58:11 +02004515 mlx5_ib_odp_init();
Jack Morgenstein9603b612014-07-28 23:30:22 +03004516
Haggai Eran6aec21f2014-12-11 17:04:23 +02004517 err = mlx5_register_interface(&mlx5_ib_interface);
Haggai Eran6aec21f2014-12-11 17:04:23 +02004518
4519 return err;
Eli Cohene126ba92013-07-07 17:25:49 +03004520}
4521
4522static void __exit mlx5_ib_cleanup(void)
4523{
Jack Morgenstein9603b612014-07-28 23:30:22 +03004524 mlx5_unregister_interface(&mlx5_ib_interface);
Eli Cohene126ba92013-07-07 17:25:49 +03004525}
4526
4527module_init(mlx5_ib_init);
4528module_exit(mlx5_ib_cleanup);