blob: 3495bba410e345bdf0c30461cce870266c026d1e [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
Christoph Hellwigadec6402015-08-28 09:27:19 +020033#include <linux/highmem.h>
Eli Cohene126ba92013-07-07 17:25:49 +030034#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/errno.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/slab.h>
Guy Levi37aa5c32016-04-27 16:49:50 +030040#if defined(CONFIG_X86)
41#include <asm/pat.h>
42#endif
Eli Cohene126ba92013-07-07 17:25:49 +030043#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010044#include <linux/sched/mm.h>
Ingo Molnar0881e7b2017-02-05 15:30:50 +010045#include <linux/sched/task.h>
Maor Gottlieb7c2344c2016-06-17 14:56:44 +030046#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030047#include <rdma/ib_user_verbs.h>
Achiad Shochat3f89a642015-12-23 18:47:21 +020048#include <rdma/ib_addr.h>
Achiad Shochat2811ba52015-12-23 18:47:24 +020049#include <rdma/ib_cache.h>
Achiad Shochatada68c32016-02-22 18:17:23 +020050#include <linux/mlx5/port.h>
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030051#include <linux/mlx5/vport.h>
Maor Gottlieb7c2344c2016-06-17 14:56:44 +030052#include <linux/list.h>
Eli Cohene126ba92013-07-07 17:25:49 +030053#include <rdma/ib_smi.h>
54#include <rdma/ib_umem.h>
Maor Gottlieb038d2ef2016-01-11 10:26:07 +020055#include <linux/in.h>
56#include <linux/etherdevice.h>
57#include <linux/mlx5/fs.h>
Or Gerlitz78984892016-11-30 20:33:33 +020058#include <linux/mlx5/vport.h>
Eli Cohene126ba92013-07-07 17:25:49 +030059#include "mlx5_ib.h"
60
61#define DRIVER_NAME "mlx5_ib"
Amir Vadai169a1d82014-02-19 17:47:31 +020062#define DRIVER_VERSION "2.2-1"
63#define DRIVER_RELDATE "Feb 2014"
Eli Cohene126ba92013-07-07 17:25:49 +030064
65MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
66MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
67MODULE_LICENSE("Dual BSD/GPL");
68MODULE_VERSION(DRIVER_VERSION);
69
Eli Cohene126ba92013-07-07 17:25:49 +030070static char mlx5_version[] =
71 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
72 DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
73
Eran Ben Elishada7525d2015-12-14 16:34:10 +020074enum {
75 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
76};
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030077
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030078static enum rdma_link_layer
Achiad Shochatebd61f62015-12-23 18:47:16 +020079mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030080{
Achiad Shochatebd61f62015-12-23 18:47:16 +020081 switch (port_type_cap) {
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030082 case MLX5_CAP_PORT_TYPE_IB:
83 return IB_LINK_LAYER_INFINIBAND;
84 case MLX5_CAP_PORT_TYPE_ETH:
85 return IB_LINK_LAYER_ETHERNET;
86 default:
87 return IB_LINK_LAYER_UNSPECIFIED;
88 }
89}
90
Achiad Shochatebd61f62015-12-23 18:47:16 +020091static enum rdma_link_layer
92mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
93{
94 struct mlx5_ib_dev *dev = to_mdev(device);
95 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
96
97 return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
98}
99
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200100static int mlx5_netdev_event(struct notifier_block *this,
101 unsigned long event, void *ptr)
102{
103 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
104 struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
105 roce.nb);
106
Aviv Heller5ec8c832016-09-18 20:48:00 +0300107 switch (event) {
108 case NETDEV_REGISTER:
109 case NETDEV_UNREGISTER:
110 write_lock(&ibdev->roce.netdev_lock);
111 if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
112 ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
113 NULL : ndev;
114 write_unlock(&ibdev->roce.netdev_lock);
115 break;
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200116
Aviv Heller5ec8c832016-09-18 20:48:00 +0300117 case NETDEV_UP:
Aviv Heller88621df2016-09-18 20:48:02 +0300118 case NETDEV_DOWN: {
119 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
120 struct net_device *upper = NULL;
121
122 if (lag_ndev) {
123 upper = netdev_master_upper_dev_get(lag_ndev);
124 dev_put(lag_ndev);
125 }
126
127 if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
128 && ibdev->ib_active) {
Bart Van Assche626bc022016-12-05 17:18:08 -0800129 struct ib_event ibev = { };
Aviv Heller5ec8c832016-09-18 20:48:00 +0300130
131 ibev.device = &ibdev->ib_dev;
132 ibev.event = (event == NETDEV_UP) ?
133 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
134 ibev.element.port_num = 1;
135 ib_dispatch_event(&ibev);
136 }
137 break;
Aviv Heller88621df2016-09-18 20:48:02 +0300138 }
Aviv Heller5ec8c832016-09-18 20:48:00 +0300139
140 default:
141 break;
142 }
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200143
144 return NOTIFY_DONE;
145}
146
147static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
148 u8 port_num)
149{
150 struct mlx5_ib_dev *ibdev = to_mdev(device);
151 struct net_device *ndev;
152
Aviv Heller88621df2016-09-18 20:48:02 +0300153 ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
154 if (ndev)
155 return ndev;
156
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200157 /* Ensure ndev does not disappear before we invoke dev_hold()
158 */
159 read_lock(&ibdev->roce.netdev_lock);
160 ndev = ibdev->roce.netdev;
161 if (ndev)
162 dev_hold(ndev);
163 read_unlock(&ibdev->roce.netdev_lock);
164
165 return ndev;
166}
167
Achiad Shochat3f89a642015-12-23 18:47:21 +0200168static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
169 struct ib_port_attr *props)
170{
171 struct mlx5_ib_dev *dev = to_mdev(device);
Aviv Heller88621df2016-09-18 20:48:02 +0300172 struct net_device *ndev, *upper;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200173 enum ib_mtu ndev_ib_mtu;
Leon Romanovskyc876a1b2016-01-09 13:06:25 +0200174 u16 qkey_viol_cntr;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200175
Or Gerlitzc4550c62017-01-24 13:02:39 +0200176 /* props being zeroed by the caller, avoid zeroing it here */
Achiad Shochat3f89a642015-12-23 18:47:21 +0200177
178 props->port_cap_flags |= IB_PORT_CM_SUP;
179 props->port_cap_flags |= IB_PORT_IP_BASED_GIDS;
180
181 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
182 roce_address_table_size);
183 props->max_mtu = IB_MTU_4096;
184 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
185 props->pkey_tbl_len = 1;
186 props->state = IB_PORT_DOWN;
187 props->phys_state = 3;
188
Leon Romanovskyc876a1b2016-01-09 13:06:25 +0200189 mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr);
190 props->qkey_viol_cntr = qkey_viol_cntr;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200191
192 ndev = mlx5_ib_get_netdev(device, port_num);
193 if (!ndev)
194 return 0;
195
Aviv Heller88621df2016-09-18 20:48:02 +0300196 if (mlx5_lag_is_active(dev->mdev)) {
197 rcu_read_lock();
198 upper = netdev_master_upper_dev_get_rcu(ndev);
199 if (upper) {
200 dev_put(ndev);
201 ndev = upper;
202 dev_hold(ndev);
203 }
204 rcu_read_unlock();
205 }
206
Achiad Shochat3f89a642015-12-23 18:47:21 +0200207 if (netif_running(ndev) && netif_carrier_ok(ndev)) {
208 props->state = IB_PORT_ACTIVE;
209 props->phys_state = 5;
210 }
211
212 ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
213
214 dev_put(ndev);
215
216 props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
217
218 props->active_width = IB_WIDTH_4X; /* TODO */
219 props->active_speed = IB_SPEED_QDR; /* TODO */
220
221 return 0;
222}
223
Achiad Shochat3cca2602015-12-23 18:47:23 +0200224static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid,
225 const struct ib_gid_attr *attr,
226 void *mlx5_addr)
227{
228#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
229 char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
230 source_l3_address);
231 void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
232 source_mac_47_32);
233
234 if (!gid)
235 return;
236
237 ether_addr_copy(mlx5_addr_mac, attr->ndev->dev_addr);
238
239 if (is_vlan_dev(attr->ndev)) {
240 MLX5_SET_RA(mlx5_addr, vlan_valid, 1);
241 MLX5_SET_RA(mlx5_addr, vlan_id, vlan_dev_vlan_id(attr->ndev));
242 }
243
244 switch (attr->gid_type) {
245 case IB_GID_TYPE_IB:
246 MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1);
247 break;
248 case IB_GID_TYPE_ROCE_UDP_ENCAP:
249 MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2);
250 break;
251
252 default:
253 WARN_ON(true);
254 }
255
256 if (attr->gid_type != IB_GID_TYPE_IB) {
257 if (ipv6_addr_v4mapped((void *)gid))
258 MLX5_SET_RA(mlx5_addr, roce_l3_type,
259 MLX5_ROCE_L3_TYPE_IPV4);
260 else
261 MLX5_SET_RA(mlx5_addr, roce_l3_type,
262 MLX5_ROCE_L3_TYPE_IPV6);
263 }
264
265 if ((attr->gid_type == IB_GID_TYPE_IB) ||
266 !ipv6_addr_v4mapped((void *)gid))
267 memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
268 else
269 memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4);
270}
271
272static int set_roce_addr(struct ib_device *device, u8 port_num,
273 unsigned int index,
274 const union ib_gid *gid,
275 const struct ib_gid_attr *attr)
276{
Saeed Mahameedc4f287c2016-07-19 20:17:12 +0300277 struct mlx5_ib_dev *dev = to_mdev(device);
278 u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
279 u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
Achiad Shochat3cca2602015-12-23 18:47:23 +0200280 void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
281 enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
282
283 if (ll != IB_LINK_LAYER_ETHERNET)
284 return -EINVAL;
285
Achiad Shochat3cca2602015-12-23 18:47:23 +0200286 ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
287
288 MLX5_SET(set_roce_address_in, in, roce_address_index, index);
289 MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
Achiad Shochat3cca2602015-12-23 18:47:23 +0200290 return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
291}
292
293static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
294 unsigned int index, const union ib_gid *gid,
295 const struct ib_gid_attr *attr,
296 __always_unused void **context)
297{
298 return set_roce_addr(device, port_num, index, gid, attr);
299}
300
301static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num,
302 unsigned int index, __always_unused void **context)
303{
304 return set_roce_addr(device, port_num, index, NULL, NULL);
305}
306
Achiad Shochat2811ba52015-12-23 18:47:24 +0200307__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
308 int index)
309{
310 struct ib_gid_attr attr;
311 union ib_gid gid;
312
313 if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
314 return 0;
315
316 if (!attr.ndev)
317 return 0;
318
319 dev_put(attr.ndev);
320
321 if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
322 return 0;
323
324 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
325}
326
Majd Dibbinyed884512017-01-18 14:10:35 +0200327int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
328 int index, enum ib_gid_type *gid_type)
329{
330 struct ib_gid_attr attr;
331 union ib_gid gid;
332 int ret;
333
334 ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
335 if (ret)
336 return ret;
337
338 if (!attr.ndev)
339 return -ENODEV;
340
341 dev_put(attr.ndev);
342
343 *gid_type = attr.gid_type;
344
345 return 0;
346}
347
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300348static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
349{
Noa Osherovich7fae6652016-09-12 19:16:23 +0300350 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
351 return !MLX5_CAP_GEN(dev->mdev, ib_virt);
352 return 0;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300353}
354
355enum {
356 MLX5_VPORT_ACCESS_METHOD_MAD,
357 MLX5_VPORT_ACCESS_METHOD_HCA,
358 MLX5_VPORT_ACCESS_METHOD_NIC,
359};
360
361static int mlx5_get_vport_access_method(struct ib_device *ibdev)
362{
363 if (mlx5_use_mad_ifc(to_mdev(ibdev)))
364 return MLX5_VPORT_ACCESS_METHOD_MAD;
365
Achiad Shochatebd61f62015-12-23 18:47:16 +0200366 if (mlx5_ib_port_link_layer(ibdev, 1) ==
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300367 IB_LINK_LAYER_ETHERNET)
368 return MLX5_VPORT_ACCESS_METHOD_NIC;
369
370 return MLX5_VPORT_ACCESS_METHOD_HCA;
371}
372
Eran Ben Elishada7525d2015-12-14 16:34:10 +0200373static void get_atomic_caps(struct mlx5_ib_dev *dev,
374 struct ib_device_attr *props)
375{
376 u8 tmp;
377 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
378 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
379 u8 atomic_req_8B_endianness_mode =
380 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode);
381
382 /* Check if HW supports 8 bytes standard atomic operations and capable
383 * of host endianness respond
384 */
385 tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
386 if (((atomic_operations & tmp) == tmp) &&
387 (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
388 (atomic_req_8B_endianness_mode)) {
389 props->atomic_cap = IB_ATOMIC_HCA;
390 } else {
391 props->atomic_cap = IB_ATOMIC_NONE;
392 }
393}
394
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300395static int mlx5_query_system_image_guid(struct ib_device *ibdev,
396 __be64 *sys_image_guid)
397{
398 struct mlx5_ib_dev *dev = to_mdev(ibdev);
399 struct mlx5_core_dev *mdev = dev->mdev;
400 u64 tmp;
401 int err;
402
403 switch (mlx5_get_vport_access_method(ibdev)) {
404 case MLX5_VPORT_ACCESS_METHOD_MAD:
405 return mlx5_query_mad_ifc_system_image_guid(ibdev,
406 sys_image_guid);
407
408 case MLX5_VPORT_ACCESS_METHOD_HCA:
409 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
Achiad Shochat3f89a642015-12-23 18:47:21 +0200410 break;
411
412 case MLX5_VPORT_ACCESS_METHOD_NIC:
413 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
414 break;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300415
416 default:
417 return -EINVAL;
418 }
Achiad Shochat3f89a642015-12-23 18:47:21 +0200419
420 if (!err)
421 *sys_image_guid = cpu_to_be64(tmp);
422
423 return err;
424
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300425}
426
427static int mlx5_query_max_pkeys(struct ib_device *ibdev,
428 u16 *max_pkeys)
429{
430 struct mlx5_ib_dev *dev = to_mdev(ibdev);
431 struct mlx5_core_dev *mdev = dev->mdev;
432
433 switch (mlx5_get_vport_access_method(ibdev)) {
434 case MLX5_VPORT_ACCESS_METHOD_MAD:
435 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
436
437 case MLX5_VPORT_ACCESS_METHOD_HCA:
438 case MLX5_VPORT_ACCESS_METHOD_NIC:
439 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
440 pkey_table_size));
441 return 0;
442
443 default:
444 return -EINVAL;
445 }
446}
447
448static int mlx5_query_vendor_id(struct ib_device *ibdev,
449 u32 *vendor_id)
450{
451 struct mlx5_ib_dev *dev = to_mdev(ibdev);
452
453 switch (mlx5_get_vport_access_method(ibdev)) {
454 case MLX5_VPORT_ACCESS_METHOD_MAD:
455 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
456
457 case MLX5_VPORT_ACCESS_METHOD_HCA:
458 case MLX5_VPORT_ACCESS_METHOD_NIC:
459 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
460
461 default:
462 return -EINVAL;
463 }
464}
465
466static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
467 __be64 *node_guid)
468{
469 u64 tmp;
470 int err;
471
472 switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
473 case MLX5_VPORT_ACCESS_METHOD_MAD:
474 return mlx5_query_mad_ifc_node_guid(dev, node_guid);
475
476 case MLX5_VPORT_ACCESS_METHOD_HCA:
477 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
Achiad Shochat3f89a642015-12-23 18:47:21 +0200478 break;
479
480 case MLX5_VPORT_ACCESS_METHOD_NIC:
481 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
482 break;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300483
484 default:
485 return -EINVAL;
486 }
Achiad Shochat3f89a642015-12-23 18:47:21 +0200487
488 if (!err)
489 *node_guid = cpu_to_be64(tmp);
490
491 return err;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300492}
493
494struct mlx5_reg_node_desc {
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700495 u8 desc[IB_DEVICE_NODE_DESC_MAX];
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300496};
497
498static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
499{
500 struct mlx5_reg_node_desc in;
501
502 if (mlx5_use_mad_ifc(dev))
503 return mlx5_query_mad_ifc_node_desc(dev, node_desc);
504
505 memset(&in, 0, sizeof(in));
506
507 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
508 sizeof(struct mlx5_reg_node_desc),
509 MLX5_REG_NODE_DESC, 0, 0);
510}
511
Eli Cohene126ba92013-07-07 17:25:49 +0300512static int mlx5_ib_query_device(struct ib_device *ibdev,
Matan Barak2528e332015-06-11 16:35:25 +0300513 struct ib_device_attr *props,
514 struct ib_udata *uhw)
Eli Cohene126ba92013-07-07 17:25:49 +0300515{
516 struct mlx5_ib_dev *dev = to_mdev(ibdev);
Saeed Mahameed938fe832015-05-28 22:28:41 +0300517 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300518 int err = -ENOMEM;
Eli Cohen288c01b2016-10-27 16:36:45 +0300519 int max_sq_desc;
Eli Cohene126ba92013-07-07 17:25:49 +0300520 int max_rq_sg;
521 int max_sq_sg;
Sagi Grimberge0238a62015-07-21 14:40:12 +0300522 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
Bodong Wang402ca532016-06-17 15:02:20 +0300523 struct mlx5_ib_query_device_resp resp = {};
524 size_t resp_len;
525 u64 max_tso;
Eli Cohene126ba92013-07-07 17:25:49 +0300526
Bodong Wang402ca532016-06-17 15:02:20 +0300527 resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
528 if (uhw->outlen && uhw->outlen < resp_len)
529 return -EINVAL;
530 else
531 resp.response_length = resp_len;
532
533 if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
Matan Barak2528e332015-06-11 16:35:25 +0300534 return -EINVAL;
535
Eli Cohene126ba92013-07-07 17:25:49 +0300536 memset(props, 0, sizeof(*props));
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300537 err = mlx5_query_system_image_guid(ibdev,
538 &props->sys_image_guid);
539 if (err)
540 return err;
541
542 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
543 if (err)
544 return err;
545
546 err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
547 if (err)
548 return err;
Eli Cohene126ba92013-07-07 17:25:49 +0300549
Jack Morgenstein9603b612014-07-28 23:30:22 +0300550 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
551 (fw_rev_min(dev->mdev) << 16) |
552 fw_rev_sub(dev->mdev);
Eli Cohene126ba92013-07-07 17:25:49 +0300553 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
554 IB_DEVICE_PORT_ACTIVE_EVENT |
555 IB_DEVICE_SYS_IMAGE_GUID |
Eli Cohen1a4c3a32014-02-06 17:41:25 +0200556 IB_DEVICE_RC_RNR_NAK_GEN;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300557
558 if (MLX5_CAP_GEN(mdev, pkv))
Eli Cohene126ba92013-07-07 17:25:49 +0300559 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300560 if (MLX5_CAP_GEN(mdev, qkv))
Eli Cohene126ba92013-07-07 17:25:49 +0300561 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300562 if (MLX5_CAP_GEN(mdev, apm))
Eli Cohene126ba92013-07-07 17:25:49 +0300563 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300564 if (MLX5_CAP_GEN(mdev, xrc))
Eli Cohene126ba92013-07-07 17:25:49 +0300565 props->device_cap_flags |= IB_DEVICE_XRC;
Matan Barakd2370e02016-02-29 18:05:30 +0200566 if (MLX5_CAP_GEN(mdev, imaicl)) {
567 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
568 IB_DEVICE_MEM_WINDOW_TYPE_2B;
569 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
Sagi Grimbergb005d312016-02-29 19:07:33 +0200570 /* We support 'Gappy' memory registration too */
571 props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
Matan Barakd2370e02016-02-29 18:05:30 +0200572 }
Eli Cohene126ba92013-07-07 17:25:49 +0300573 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300574 if (MLX5_CAP_GEN(mdev, sho)) {
Sagi Grimberg2dea9092014-02-23 14:19:13 +0200575 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
576 /* At this stage no support for signature handover */
577 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
578 IB_PROT_T10DIF_TYPE_2 |
579 IB_PROT_T10DIF_TYPE_3;
580 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
581 IB_GUARD_T10DIF_CSUM;
582 }
Saeed Mahameed938fe832015-05-28 22:28:41 +0300583 if (MLX5_CAP_GEN(mdev, block_lb_mc))
Eli Cohenf360d882014-04-02 00:10:16 +0300584 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
Eli Cohene126ba92013-07-07 17:25:49 +0300585
Bodong Wang402ca532016-06-17 15:02:20 +0300586 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) {
Noa Osheroviche8161332017-01-18 15:40:01 +0200587 if (MLX5_CAP_ETH(mdev, csum_cap)) {
588 /* Legacy bit to support old userspace libraries */
Bodong Wang88115fe2015-12-18 13:53:20 +0200589 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
Noa Osheroviche8161332017-01-18 15:40:01 +0200590 props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
591 }
592
593 if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
594 props->raw_packet_caps |=
595 IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
Bodong Wang88115fe2015-12-18 13:53:20 +0200596
Bodong Wang402ca532016-06-17 15:02:20 +0300597 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
598 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
599 if (max_tso) {
600 resp.tso_caps.max_tso = 1 << max_tso;
601 resp.tso_caps.supported_qpts |=
602 1 << IB_QPT_RAW_PACKET;
603 resp.response_length += sizeof(resp.tso_caps);
604 }
605 }
Yishai Hadas31f69a82016-08-28 11:28:45 +0300606
607 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
608 resp.rss_caps.rx_hash_function =
609 MLX5_RX_HASH_FUNC_TOEPLITZ;
610 resp.rss_caps.rx_hash_fields_mask =
611 MLX5_RX_HASH_SRC_IPV4 |
612 MLX5_RX_HASH_DST_IPV4 |
613 MLX5_RX_HASH_SRC_IPV6 |
614 MLX5_RX_HASH_DST_IPV6 |
615 MLX5_RX_HASH_SRC_PORT_TCP |
616 MLX5_RX_HASH_DST_PORT_TCP |
617 MLX5_RX_HASH_SRC_PORT_UDP |
618 MLX5_RX_HASH_DST_PORT_UDP;
619 resp.response_length += sizeof(resp.rss_caps);
620 }
621 } else {
622 if (field_avail(typeof(resp), tso_caps, uhw->outlen))
623 resp.response_length += sizeof(resp.tso_caps);
624 if (field_avail(typeof(resp), rss_caps, uhw->outlen))
625 resp.response_length += sizeof(resp.rss_caps);
Bodong Wang402ca532016-06-17 15:02:20 +0300626 }
627
Erez Shitritf0313962016-02-21 16:27:17 +0200628 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
629 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
630 props->device_cap_flags |= IB_DEVICE_UD_TSO;
631 }
632
Majd Dibbinycff5a0f2016-04-17 17:19:38 +0300633 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
Noa Osheroviche8161332017-01-18 15:40:01 +0200634 MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
635 /* Legacy bit to support old userspace libraries */
Majd Dibbinycff5a0f2016-04-17 17:19:38 +0300636 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
Noa Osheroviche8161332017-01-18 15:40:01 +0200637 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
638 }
Majd Dibbinycff5a0f2016-04-17 17:19:38 +0300639
Maor Gottliebda6d6ba32016-06-04 15:15:28 +0300640 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
641 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
642
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300643 props->vendor_part_id = mdev->pdev->device;
644 props->hw_ver = mdev->pdev->revision;
Eli Cohene126ba92013-07-07 17:25:49 +0300645
646 props->max_mr_size = ~0ull;
Sagi Grimberge0238a62015-07-21 14:40:12 +0300647 props->page_size_cap = ~(min_page_size - 1);
Saeed Mahameed938fe832015-05-28 22:28:41 +0300648 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
649 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
650 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
651 sizeof(struct mlx5_wqe_data_seg);
Eli Cohen288c01b2016-10-27 16:36:45 +0300652 max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
653 max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
654 sizeof(struct mlx5_wqe_raddr_seg)) /
655 sizeof(struct mlx5_wqe_data_seg);
Eli Cohene126ba92013-07-07 17:25:49 +0300656 props->max_sge = min(max_rq_sg, max_sq_sg);
Sagi Grimberg986ef952016-03-31 19:03:25 +0300657 props->max_sge_rd = MLX5_MAX_SGE_RD;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300658 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
Leon Romanovsky9f177682016-01-14 08:11:40 +0200659 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300660 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
661 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
662 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
663 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
664 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
665 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
666 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
Eli Cohene126ba92013-07-07 17:25:49 +0300667 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
Eli Cohene126ba92013-07-07 17:25:49 +0300668 props->max_srq_sge = max_rq_sg - 1;
Sagi Grimberg911f4332016-03-03 13:37:51 +0200669 props->max_fast_reg_page_list_len =
670 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
Eran Ben Elishada7525d2015-12-14 16:34:10 +0200671 get_atomic_caps(dev, props);
Eli Cohen81bea282013-09-11 16:35:30 +0300672 props->masked_atomic_cap = IB_ATOMIC_NONE;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300673 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
674 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
Eli Cohene126ba92013-07-07 17:25:49 +0300675 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
676 props->max_mcast_grp;
677 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
Maor Gottlieb86695a62016-10-27 16:36:38 +0300678 props->max_ah = INT_MAX;
Matan Barak7c60bcb2015-12-15 20:30:11 +0200679 props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
680 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
Eli Cohene126ba92013-07-07 17:25:49 +0300681
Haggai Eran8cdd3122014-12-11 17:04:20 +0200682#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Saeed Mahameed938fe832015-05-28 22:28:41 +0300683 if (MLX5_CAP_GEN(mdev, pg))
Haggai Eran8cdd3122014-12-11 17:04:20 +0200684 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
685 props->odp_caps = dev->odp_caps;
686#endif
687
Leon Romanovsky051f2632015-12-20 12:16:11 +0200688 if (MLX5_CAP_GEN(mdev, cd))
689 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
690
Eli Coheneff901d2016-03-11 22:58:42 +0200691 if (!mlx5_core_is_pf(mdev))
692 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
693
Yishai Hadas31f69a82016-08-28 11:28:45 +0300694 if (mlx5_ib_port_link_layer(ibdev, 1) ==
695 IB_LINK_LAYER_ETHERNET) {
696 props->rss_caps.max_rwq_indirection_tables =
697 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
698 props->rss_caps.max_rwq_indirection_table_size =
699 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
700 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
701 props->max_wq_type_rq =
702 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
703 }
704
Bodong Wang7e43a2a2016-10-31 12:16:44 +0200705 if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
706 resp.cqe_comp_caps.max_num =
707 MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
708 MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0;
709 resp.cqe_comp_caps.supported_format =
710 MLX5_IB_CQE_RES_FORMAT_HASH |
711 MLX5_IB_CQE_RES_FORMAT_CSUM;
712 resp.response_length += sizeof(resp.cqe_comp_caps);
713 }
714
Bodong Wangd9491672016-12-01 13:43:13 +0200715 if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
716 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
717 MLX5_CAP_GEN(mdev, qos)) {
718 resp.packet_pacing_caps.qp_rate_limit_max =
719 MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
720 resp.packet_pacing_caps.qp_rate_limit_min =
721 MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
722 resp.packet_pacing_caps.supported_qpts |=
723 1 << IB_QPT_RAW_PACKET;
724 }
725 resp.response_length += sizeof(resp.packet_pacing_caps);
726 }
727
Leon Romanovsky9f885202017-01-02 11:37:39 +0200728 if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
729 uhw->outlen)) {
730 resp.mlx5_ib_support_multi_pkt_send_wqes =
731 MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
732 resp.response_length +=
733 sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
734 }
735
736 if (field_avail(typeof(resp), reserved, uhw->outlen))
737 resp.response_length += sizeof(resp.reserved);
738
Bodong Wang402ca532016-06-17 15:02:20 +0300739 if (uhw->outlen) {
740 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
741
742 if (err)
743 return err;
744 }
745
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300746 return 0;
747}
Eli Cohene126ba92013-07-07 17:25:49 +0300748
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300749enum mlx5_ib_width {
750 MLX5_IB_WIDTH_1X = 1 << 0,
751 MLX5_IB_WIDTH_2X = 1 << 1,
752 MLX5_IB_WIDTH_4X = 1 << 2,
753 MLX5_IB_WIDTH_8X = 1 << 3,
754 MLX5_IB_WIDTH_12X = 1 << 4
755};
756
757static int translate_active_width(struct ib_device *ibdev, u8 active_width,
758 u8 *ib_width)
759{
760 struct mlx5_ib_dev *dev = to_mdev(ibdev);
761 int err = 0;
762
763 if (active_width & MLX5_IB_WIDTH_1X) {
764 *ib_width = IB_WIDTH_1X;
765 } else if (active_width & MLX5_IB_WIDTH_2X) {
766 mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
767 (int)active_width);
768 err = -EINVAL;
769 } else if (active_width & MLX5_IB_WIDTH_4X) {
770 *ib_width = IB_WIDTH_4X;
771 } else if (active_width & MLX5_IB_WIDTH_8X) {
772 *ib_width = IB_WIDTH_8X;
773 } else if (active_width & MLX5_IB_WIDTH_12X) {
774 *ib_width = IB_WIDTH_12X;
775 } else {
776 mlx5_ib_dbg(dev, "Invalid active_width %d\n",
777 (int)active_width);
778 err = -EINVAL;
779 }
780
781 return err;
782}
783
784static int mlx5_mtu_to_ib_mtu(int mtu)
785{
786 switch (mtu) {
787 case 256: return 1;
788 case 512: return 2;
789 case 1024: return 3;
790 case 2048: return 4;
791 case 4096: return 5;
792 default:
793 pr_warn("invalid mtu\n");
794 return -1;
795 }
796}
797
798enum ib_max_vl_num {
799 __IB_MAX_VL_0 = 1,
800 __IB_MAX_VL_0_1 = 2,
801 __IB_MAX_VL_0_3 = 3,
802 __IB_MAX_VL_0_7 = 4,
803 __IB_MAX_VL_0_14 = 5,
804};
805
806enum mlx5_vl_hw_cap {
807 MLX5_VL_HW_0 = 1,
808 MLX5_VL_HW_0_1 = 2,
809 MLX5_VL_HW_0_2 = 3,
810 MLX5_VL_HW_0_3 = 4,
811 MLX5_VL_HW_0_4 = 5,
812 MLX5_VL_HW_0_5 = 6,
813 MLX5_VL_HW_0_6 = 7,
814 MLX5_VL_HW_0_7 = 8,
815 MLX5_VL_HW_0_14 = 15
816};
817
818static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
819 u8 *max_vl_num)
820{
821 switch (vl_hw_cap) {
822 case MLX5_VL_HW_0:
823 *max_vl_num = __IB_MAX_VL_0;
824 break;
825 case MLX5_VL_HW_0_1:
826 *max_vl_num = __IB_MAX_VL_0_1;
827 break;
828 case MLX5_VL_HW_0_3:
829 *max_vl_num = __IB_MAX_VL_0_3;
830 break;
831 case MLX5_VL_HW_0_7:
832 *max_vl_num = __IB_MAX_VL_0_7;
833 break;
834 case MLX5_VL_HW_0_14:
835 *max_vl_num = __IB_MAX_VL_0_14;
836 break;
837
838 default:
839 return -EINVAL;
840 }
841
842 return 0;
843}
844
845static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
846 struct ib_port_attr *props)
847{
848 struct mlx5_ib_dev *dev = to_mdev(ibdev);
849 struct mlx5_core_dev *mdev = dev->mdev;
850 struct mlx5_hca_vport_context *rep;
Saeed Mahameed046339e2016-04-22 00:33:03 +0300851 u16 max_mtu;
852 u16 oper_mtu;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300853 int err;
854 u8 ib_link_width_oper;
855 u8 vl_hw_cap;
856
857 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
858 if (!rep) {
859 err = -ENOMEM;
860 goto out;
861 }
862
Or Gerlitzc4550c62017-01-24 13:02:39 +0200863 /* props being zeroed by the caller, avoid zeroing it here */
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300864
865 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
866 if (err)
867 goto out;
868
869 props->lid = rep->lid;
870 props->lmc = rep->lmc;
871 props->sm_lid = rep->sm_lid;
872 props->sm_sl = rep->sm_sl;
873 props->state = rep->vport_state;
874 props->phys_state = rep->port_physical_state;
875 props->port_cap_flags = rep->cap_mask1;
876 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
877 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
878 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
879 props->bad_pkey_cntr = rep->pkey_violation_counter;
880 props->qkey_viol_cntr = rep->qkey_violation_counter;
881 props->subnet_timeout = rep->subnet_timeout;
882 props->init_type_reply = rep->init_type_reply;
Eli Coheneff901d2016-03-11 22:58:42 +0200883 props->grh_required = rep->grh_required;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300884
885 err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
886 if (err)
887 goto out;
888
889 err = translate_active_width(ibdev, ib_link_width_oper,
890 &props->active_width);
891 if (err)
892 goto out;
Noa Osherovichd5beb7f2016-06-02 10:47:53 +0300893 err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300894 if (err)
895 goto out;
896
Saeed Mahameedfacc9692015-06-11 14:47:27 +0300897 mlx5_query_port_max_mtu(mdev, &max_mtu, port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300898
899 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
900
Saeed Mahameedfacc9692015-06-11 14:47:27 +0300901 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300902
903 props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
904
905 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
906 if (err)
907 goto out;
908
909 err = translate_max_vl_num(ibdev, vl_hw_cap,
910 &props->max_vl_num);
911out:
912 kfree(rep);
Eli Cohene126ba92013-07-07 17:25:49 +0300913 return err;
914}
915
916int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
917 struct ib_port_attr *props)
918{
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300919 switch (mlx5_get_vport_access_method(ibdev)) {
920 case MLX5_VPORT_ACCESS_METHOD_MAD:
921 return mlx5_query_mad_ifc_port(ibdev, port, props);
Eli Cohene126ba92013-07-07 17:25:49 +0300922
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300923 case MLX5_VPORT_ACCESS_METHOD_HCA:
924 return mlx5_query_hca_port(ibdev, port, props);
925
Achiad Shochat3f89a642015-12-23 18:47:21 +0200926 case MLX5_VPORT_ACCESS_METHOD_NIC:
927 return mlx5_query_port_roce(ibdev, port, props);
928
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300929 default:
Eli Cohene126ba92013-07-07 17:25:49 +0300930 return -EINVAL;
931 }
Eli Cohene126ba92013-07-07 17:25:49 +0300932}
933
934static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
935 union ib_gid *gid)
936{
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300937 struct mlx5_ib_dev *dev = to_mdev(ibdev);
938 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300939
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300940 switch (mlx5_get_vport_access_method(ibdev)) {
941 case MLX5_VPORT_ACCESS_METHOD_MAD:
942 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
Eli Cohene126ba92013-07-07 17:25:49 +0300943
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300944 case MLX5_VPORT_ACCESS_METHOD_HCA:
945 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
Eli Cohene126ba92013-07-07 17:25:49 +0300946
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300947 default:
948 return -EINVAL;
949 }
Eli Cohene126ba92013-07-07 17:25:49 +0300950
Eli Cohene126ba92013-07-07 17:25:49 +0300951}
952
953static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
954 u16 *pkey)
955{
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300956 struct mlx5_ib_dev *dev = to_mdev(ibdev);
957 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300958
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300959 switch (mlx5_get_vport_access_method(ibdev)) {
960 case MLX5_VPORT_ACCESS_METHOD_MAD:
961 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
Eli Cohene126ba92013-07-07 17:25:49 +0300962
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300963 case MLX5_VPORT_ACCESS_METHOD_HCA:
964 case MLX5_VPORT_ACCESS_METHOD_NIC:
965 return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
966 pkey);
967 default:
968 return -EINVAL;
969 }
Eli Cohene126ba92013-07-07 17:25:49 +0300970}
971
Eli Cohene126ba92013-07-07 17:25:49 +0300972static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
973 struct ib_device_modify *props)
974{
975 struct mlx5_ib_dev *dev = to_mdev(ibdev);
976 struct mlx5_reg_node_desc in;
977 struct mlx5_reg_node_desc out;
978 int err;
979
980 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
981 return -EOPNOTSUPP;
982
983 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
984 return 0;
985
986 /*
987 * If possible, pass node desc to FW, so it can generate
988 * a 144 trap. If cmd fails, just ignore.
989 */
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700990 memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300991 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
Eli Cohene126ba92013-07-07 17:25:49 +0300992 sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
993 if (err)
994 return err;
995
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700996 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
Eli Cohene126ba92013-07-07 17:25:49 +0300997
998 return err;
999}
1000
Eli Cohencdbe33d2017-02-14 07:25:38 +02001001static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
1002 u32 value)
1003{
1004 struct mlx5_hca_vport_context ctx = {};
1005 int err;
1006
1007 err = mlx5_query_hca_vport_context(dev->mdev, 0,
1008 port_num, 0, &ctx);
1009 if (err)
1010 return err;
1011
1012 if (~ctx.cap_mask1_perm & mask) {
1013 mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1014 mask, ctx.cap_mask1_perm);
1015 return -EINVAL;
1016 }
1017
1018 ctx.cap_mask1 = value;
1019 ctx.cap_mask1_perm = mask;
1020 err = mlx5_core_modify_hca_vport_context(dev->mdev, 0,
1021 port_num, 0, &ctx);
1022
1023 return err;
1024}
1025
Eli Cohene126ba92013-07-07 17:25:49 +03001026static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1027 struct ib_port_modify *props)
1028{
1029 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1030 struct ib_port_attr attr;
1031 u32 tmp;
1032 int err;
Eli Cohencdbe33d2017-02-14 07:25:38 +02001033 u32 change_mask;
1034 u32 value;
1035 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1036 IB_LINK_LAYER_INFINIBAND);
1037
1038 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1039 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1040 value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1041 return set_port_caps_atomic(dev, port, change_mask, value);
1042 }
Eli Cohene126ba92013-07-07 17:25:49 +03001043
1044 mutex_lock(&dev->cap_mask_mutex);
1045
Or Gerlitzc4550c62017-01-24 13:02:39 +02001046 err = ib_query_port(ibdev, port, &attr);
Eli Cohene126ba92013-07-07 17:25:49 +03001047 if (err)
1048 goto out;
1049
1050 tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1051 ~props->clr_port_cap_mask;
1052
Jack Morgenstein9603b612014-07-28 23:30:22 +03001053 err = mlx5_set_port_caps(dev->mdev, port, tmp);
Eli Cohene126ba92013-07-07 17:25:49 +03001054
1055out:
1056 mutex_unlock(&dev->cap_mask_mutex);
1057 return err;
1058}
1059
Eli Cohen30aa60b2017-01-03 23:55:27 +02001060static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1061{
1062 mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1063 caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1064}
1065
Eli Cohenb037c292017-01-03 23:55:26 +02001066static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1067 struct mlx5_ib_alloc_ucontext_req_v2 *req,
1068 u32 *num_sys_pages)
1069{
1070 int uars_per_sys_page;
1071 int bfregs_per_sys_page;
1072 int ref_bfregs = req->total_num_bfregs;
1073
1074 if (req->total_num_bfregs == 0)
1075 return -EINVAL;
1076
1077 BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1078 BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1079
1080 if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1081 return -ENOMEM;
1082
1083 uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1084 bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
1085 req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
1086 *num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1087
1088 if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1089 return -EINVAL;
1090
1091 mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, alloated %d, using %d sys pages\n",
1092 MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1093 lib_uar_4k ? "yes" : "no", ref_bfregs,
1094 req->total_num_bfregs, *num_sys_pages);
1095
1096 return 0;
1097}
1098
1099static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1100{
1101 struct mlx5_bfreg_info *bfregi;
1102 int err;
1103 int i;
1104
1105 bfregi = &context->bfregi;
1106 for (i = 0; i < bfregi->num_sys_pages; i++) {
1107 err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
1108 if (err)
1109 goto error;
1110
1111 mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1112 }
1113 return 0;
1114
1115error:
1116 for (--i; i >= 0; i--)
1117 if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
1118 mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1119
1120 return err;
1121}
1122
1123static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1124{
1125 struct mlx5_bfreg_info *bfregi;
1126 int err;
1127 int i;
1128
1129 bfregi = &context->bfregi;
1130 for (i = 0; i < bfregi->num_sys_pages; i++) {
1131 err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
1132 if (err) {
1133 mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1134 return err;
1135 }
1136 }
1137 return 0;
1138}
1139
Eli Cohene126ba92013-07-07 17:25:49 +03001140static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1141 struct ib_udata *udata)
1142{
1143 struct mlx5_ib_dev *dev = to_mdev(ibdev);
Matan Barakb368d7c2015-12-15 20:30:12 +02001144 struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1145 struct mlx5_ib_alloc_ucontext_resp resp = {};
Eli Cohene126ba92013-07-07 17:25:49 +03001146 struct mlx5_ib_ucontext *context;
Eli Cohen2f5ff262017-01-03 23:55:21 +02001147 struct mlx5_bfreg_info *bfregi;
Eli Cohen78c0f982014-01-30 13:49:48 +02001148 int ver;
Eli Cohene126ba92013-07-07 17:25:49 +03001149 int err;
Jack Morgensteinf241e742014-07-28 23:30:23 +03001150 size_t reqlen;
Majd Dibbinya168a41c2016-01-28 17:51:47 +02001151 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1152 max_cqe_version);
Eli Cohenb037c292017-01-03 23:55:26 +02001153 bool lib_uar_4k;
Eli Cohene126ba92013-07-07 17:25:49 +03001154
1155 if (!dev->ib_active)
1156 return ERR_PTR(-EAGAIN);
1157
Haggai Abramovskydfbee852016-01-14 19:12:56 +02001158 if (udata->inlen < sizeof(struct ib_uverbs_cmd_hdr))
1159 return ERR_PTR(-EINVAL);
1160
Eli Cohen78c0f982014-01-30 13:49:48 +02001161 reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
1162 if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1163 ver = 0;
Majd Dibbinya168a41c2016-01-28 17:51:47 +02001164 else if (reqlen >= min_req_v2)
Eli Cohen78c0f982014-01-30 13:49:48 +02001165 ver = 2;
1166 else
1167 return ERR_PTR(-EINVAL);
1168
Matan Barakb368d7c2015-12-15 20:30:12 +02001169 err = ib_copy_from_udata(&req, udata, min(reqlen, sizeof(req)));
Eli Cohene126ba92013-07-07 17:25:49 +03001170 if (err)
1171 return ERR_PTR(err);
1172
Matan Barakb368d7c2015-12-15 20:30:12 +02001173 if (req.flags)
Eli Cohen78c0f982014-01-30 13:49:48 +02001174 return ERR_PTR(-EINVAL);
1175
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001176 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
Matan Barakb368d7c2015-12-15 20:30:12 +02001177 return ERR_PTR(-EOPNOTSUPP);
1178
Eli Cohen2f5ff262017-01-03 23:55:21 +02001179 req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1180 MLX5_NON_FP_BFREGS_PER_UAR);
1181 if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
Eli Cohene126ba92013-07-07 17:25:49 +03001182 return ERR_PTR(-EINVAL);
1183
Saeed Mahameed938fe832015-05-28 22:28:41 +03001184 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
Noa Osherovich2cc6ad52016-06-04 15:15:33 +03001185 if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
1186 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
Daniel Jurgensb47bd6e2016-10-25 18:36:24 +03001187 resp.cache_line_size = cache_line_size();
Saeed Mahameed938fe832015-05-28 22:28:41 +03001188 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1189 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1190 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1191 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1192 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001193 resp.cqe_version = min_t(__u8,
1194 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1195 req.max_cqe_version);
Eli Cohen30aa60b2017-01-03 23:55:27 +02001196 resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1197 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1198 resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1199 MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
Matan Barakb368d7c2015-12-15 20:30:12 +02001200 resp.response_length = min(offsetof(typeof(resp), response_length) +
1201 sizeof(resp.response_length), udata->outlen);
Eli Cohene126ba92013-07-07 17:25:49 +03001202
1203 context = kzalloc(sizeof(*context), GFP_KERNEL);
1204 if (!context)
1205 return ERR_PTR(-ENOMEM);
1206
Eli Cohen30aa60b2017-01-03 23:55:27 +02001207 lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
Eli Cohen2f5ff262017-01-03 23:55:21 +02001208 bfregi = &context->bfregi;
Eli Cohenb037c292017-01-03 23:55:26 +02001209
1210 /* updates req->total_num_bfregs */
1211 err = calc_total_bfregs(dev, lib_uar_4k, &req, &bfregi->num_sys_pages);
1212 if (err)
1213 goto out_ctx;
1214
Eli Cohen2f5ff262017-01-03 23:55:21 +02001215 mutex_init(&bfregi->lock);
Eli Cohenb037c292017-01-03 23:55:26 +02001216 bfregi->lib_uar_4k = lib_uar_4k;
1217 bfregi->count = kcalloc(req.total_num_bfregs, sizeof(*bfregi->count),
1218 GFP_KERNEL);
1219 if (!bfregi->count) {
Eli Cohene126ba92013-07-07 17:25:49 +03001220 err = -ENOMEM;
1221 goto out_ctx;
1222 }
1223
Eli Cohenb037c292017-01-03 23:55:26 +02001224 bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
1225 sizeof(*bfregi->sys_pages),
1226 GFP_KERNEL);
1227 if (!bfregi->sys_pages) {
Eli Cohene126ba92013-07-07 17:25:49 +03001228 err = -ENOMEM;
Eli Cohenb037c292017-01-03 23:55:26 +02001229 goto out_count;
Eli Cohene126ba92013-07-07 17:25:49 +03001230 }
1231
Eli Cohenb037c292017-01-03 23:55:26 +02001232 err = allocate_uars(dev, context);
1233 if (err)
1234 goto out_sys_pages;
Eli Cohene126ba92013-07-07 17:25:49 +03001235
Haggai Eranb4cfe442014-12-11 17:04:26 +02001236#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1237 context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
1238#endif
1239
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001240 context->upd_xlt_page = __get_free_page(GFP_KERNEL);
1241 if (!context->upd_xlt_page) {
1242 err = -ENOMEM;
1243 goto out_uars;
1244 }
1245 mutex_init(&context->upd_xlt_page_mutex);
1246
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001247 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
1248 err = mlx5_core_alloc_transport_domain(dev->mdev,
1249 &context->tdn);
1250 if (err)
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001251 goto out_page;
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001252 }
1253
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001254 INIT_LIST_HEAD(&context->vma_private_list);
Eli Cohene126ba92013-07-07 17:25:49 +03001255 INIT_LIST_HEAD(&context->db_page_list);
1256 mutex_init(&context->db_page_mutex);
1257
Eli Cohen2f5ff262017-01-03 23:55:21 +02001258 resp.tot_bfregs = req.total_num_bfregs;
Saeed Mahameed938fe832015-05-28 22:28:41 +03001259 resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
Matan Barakb368d7c2015-12-15 20:30:12 +02001260
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001261 if (field_avail(typeof(resp), cqe_version, udata->outlen))
1262 resp.response_length += sizeof(resp.cqe_version);
Matan Barakb368d7c2015-12-15 20:30:12 +02001263
Bodong Wang402ca532016-06-17 15:02:20 +03001264 if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
Moni Shoua6ad279c52016-11-23 08:23:23 +02001265 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1266 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
Bodong Wang402ca532016-06-17 15:02:20 +03001267 resp.response_length += sizeof(resp.cmds_supp_uhw);
1268 }
1269
Or Gerlitz78984892016-11-30 20:33:33 +02001270 if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
1271 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1272 mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
1273 resp.eth_min_inline++;
1274 }
1275 resp.response_length += sizeof(resp.eth_min_inline);
1276 }
1277
Noa Osherovichbc5c6ee2016-06-04 15:15:31 +03001278 /*
1279 * We don't want to expose information from the PCI bar that is located
1280 * after 4096 bytes, so if the arch only supports larger pages, let's
1281 * pretend we don't support reading the HCA's core clock. This is also
1282 * forced by mmap function.
1283 */
Eli Cohende8d6e02017-01-03 23:55:19 +02001284 if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
1285 if (PAGE_SIZE <= 4096) {
1286 resp.comp_mask |=
1287 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1288 resp.hca_core_clock_offset =
1289 offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
1290 }
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001291 resp.response_length += sizeof(resp.hca_core_clock_offset) +
Bodong Wang402ca532016-06-17 15:02:20 +03001292 sizeof(resp.reserved2);
Matan Barakb368d7c2015-12-15 20:30:12 +02001293 }
1294
Eli Cohen30aa60b2017-01-03 23:55:27 +02001295 if (field_avail(typeof(resp), log_uar_size, udata->outlen))
1296 resp.response_length += sizeof(resp.log_uar_size);
1297
1298 if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
1299 resp.response_length += sizeof(resp.num_uars_per_page);
1300
Matan Barakb368d7c2015-12-15 20:30:12 +02001301 err = ib_copy_to_udata(udata, &resp, resp.response_length);
Eli Cohene126ba92013-07-07 17:25:49 +03001302 if (err)
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001303 goto out_td;
Eli Cohene126ba92013-07-07 17:25:49 +03001304
Eli Cohen2f5ff262017-01-03 23:55:21 +02001305 bfregi->ver = ver;
1306 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001307 context->cqe_version = resp.cqe_version;
Eli Cohen30aa60b2017-01-03 23:55:27 +02001308 context->lib_caps = req.lib_caps;
1309 print_lib_caps(dev, context->lib_caps);
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001310
Eli Cohene126ba92013-07-07 17:25:49 +03001311 return &context->ibucontext;
1312
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001313out_td:
1314 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1315 mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
1316
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001317out_page:
1318 free_page(context->upd_xlt_page);
1319
Eli Cohene126ba92013-07-07 17:25:49 +03001320out_uars:
Eli Cohenb037c292017-01-03 23:55:26 +02001321 deallocate_uars(dev, context);
1322
1323out_sys_pages:
1324 kfree(bfregi->sys_pages);
1325
Eli Cohene126ba92013-07-07 17:25:49 +03001326out_count:
Eli Cohen2f5ff262017-01-03 23:55:21 +02001327 kfree(bfregi->count);
Eli Cohene126ba92013-07-07 17:25:49 +03001328
Eli Cohene126ba92013-07-07 17:25:49 +03001329out_ctx:
1330 kfree(context);
Eli Cohenb037c292017-01-03 23:55:26 +02001331
Eli Cohene126ba92013-07-07 17:25:49 +03001332 return ERR_PTR(err);
1333}
1334
1335static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1336{
1337 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1338 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
Eli Cohenb037c292017-01-03 23:55:26 +02001339 struct mlx5_bfreg_info *bfregi;
Eli Cohene126ba92013-07-07 17:25:49 +03001340
Eli Cohenb037c292017-01-03 23:55:26 +02001341 bfregi = &context->bfregi;
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001342 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1343 mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
1344
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001345 free_page(context->upd_xlt_page);
Eli Cohenb037c292017-01-03 23:55:26 +02001346 deallocate_uars(dev, context);
1347 kfree(bfregi->sys_pages);
Eli Cohen2f5ff262017-01-03 23:55:21 +02001348 kfree(bfregi->count);
Eli Cohene126ba92013-07-07 17:25:49 +03001349 kfree(context);
1350
1351 return 0;
1352}
1353
Eli Cohenb037c292017-01-03 23:55:26 +02001354static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
1355 struct mlx5_bfreg_info *bfregi,
1356 int idx)
Eli Cohene126ba92013-07-07 17:25:49 +03001357{
Eli Cohenb037c292017-01-03 23:55:26 +02001358 int fw_uars_per_page;
1359
1360 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
1361
1362 return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) +
1363 bfregi->sys_pages[idx] / fw_uars_per_page;
Eli Cohene126ba92013-07-07 17:25:49 +03001364}
1365
1366static int get_command(unsigned long offset)
1367{
1368 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
1369}
1370
1371static int get_arg(unsigned long offset)
1372{
1373 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
1374}
1375
1376static int get_index(unsigned long offset)
1377{
1378 return get_arg(offset);
1379}
1380
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001381static void mlx5_ib_vma_open(struct vm_area_struct *area)
1382{
1383 /* vma_open is called when a new VMA is created on top of our VMA. This
1384 * is done through either mremap flow or split_vma (usually due to
1385 * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
1386 * as this VMA is strongly hardware related. Therefore we set the
1387 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1388 * calling us again and trying to do incorrect actions. We assume that
1389 * the original VMA size is exactly a single page, and therefore all
1390 * "splitting" operation will not happen to it.
1391 */
1392 area->vm_ops = NULL;
1393}
1394
1395static void mlx5_ib_vma_close(struct vm_area_struct *area)
1396{
1397 struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
1398
1399 /* It's guaranteed that all VMAs opened on a FD are closed before the
1400 * file itself is closed, therefore no sync is needed with the regular
1401 * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
1402 * However need a sync with accessing the vma as part of
1403 * mlx5_ib_disassociate_ucontext.
1404 * The close operation is usually called under mm->mmap_sem except when
1405 * process is exiting.
1406 * The exiting case is handled explicitly as part of
1407 * mlx5_ib_disassociate_ucontext.
1408 */
1409 mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
1410
1411 /* setting the vma context pointer to null in the mlx5_ib driver's
1412 * private data, to protect a race condition in
1413 * mlx5_ib_disassociate_ucontext().
1414 */
1415 mlx5_ib_vma_priv_data->vma = NULL;
1416 list_del(&mlx5_ib_vma_priv_data->list);
1417 kfree(mlx5_ib_vma_priv_data);
1418}
1419
1420static const struct vm_operations_struct mlx5_ib_vm_ops = {
1421 .open = mlx5_ib_vma_open,
1422 .close = mlx5_ib_vma_close
1423};
1424
1425static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
1426 struct mlx5_ib_ucontext *ctx)
1427{
1428 struct mlx5_ib_vma_private_data *vma_prv;
1429 struct list_head *vma_head = &ctx->vma_private_list;
1430
1431 vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
1432 if (!vma_prv)
1433 return -ENOMEM;
1434
1435 vma_prv->vma = vma;
1436 vma->vm_private_data = vma_prv;
1437 vma->vm_ops = &mlx5_ib_vm_ops;
1438
1439 list_add(&vma_prv->list, vma_head);
1440
1441 return 0;
1442}
1443
1444static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1445{
1446 int ret;
1447 struct vm_area_struct *vma;
1448 struct mlx5_ib_vma_private_data *vma_private, *n;
1449 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1450 struct task_struct *owning_process = NULL;
1451 struct mm_struct *owning_mm = NULL;
1452
1453 owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
1454 if (!owning_process)
1455 return;
1456
1457 owning_mm = get_task_mm(owning_process);
1458 if (!owning_mm) {
1459 pr_info("no mm, disassociate ucontext is pending task termination\n");
1460 while (1) {
1461 put_task_struct(owning_process);
1462 usleep_range(1000, 2000);
1463 owning_process = get_pid_task(ibcontext->tgid,
1464 PIDTYPE_PID);
1465 if (!owning_process ||
1466 owning_process->state == TASK_DEAD) {
1467 pr_info("disassociate ucontext done, task was terminated\n");
1468 /* in case task was dead need to release the
1469 * task struct.
1470 */
1471 if (owning_process)
1472 put_task_struct(owning_process);
1473 return;
1474 }
1475 }
1476 }
1477
1478 /* need to protect from a race on closing the vma as part of
1479 * mlx5_ib_vma_close.
1480 */
Maor Gottliebecc7d832017-03-29 06:03:02 +03001481 down_write(&owning_mm->mmap_sem);
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001482 list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
1483 list) {
1484 vma = vma_private->vma;
1485 ret = zap_vma_ptes(vma, vma->vm_start,
1486 PAGE_SIZE);
1487 WARN_ONCE(ret, "%s: zap_vma_ptes failed", __func__);
1488 /* context going to be destroyed, should
1489 * not access ops any more.
1490 */
Maor Gottlieb13776612017-03-29 06:03:03 +03001491 vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001492 vma->vm_ops = NULL;
1493 list_del(&vma_private->list);
1494 kfree(vma_private);
1495 }
Maor Gottliebecc7d832017-03-29 06:03:02 +03001496 up_write(&owning_mm->mmap_sem);
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001497 mmput(owning_mm);
1498 put_task_struct(owning_process);
1499}
1500
Guy Levi37aa5c32016-04-27 16:49:50 +03001501static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
1502{
1503 switch (cmd) {
1504 case MLX5_IB_MMAP_WC_PAGE:
1505 return "WC";
1506 case MLX5_IB_MMAP_REGULAR_PAGE:
1507 return "best effort WC";
1508 case MLX5_IB_MMAP_NC_PAGE:
1509 return "NC";
1510 default:
1511 return NULL;
1512 }
1513}
1514
1515static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001516 struct vm_area_struct *vma,
1517 struct mlx5_ib_ucontext *context)
Guy Levi37aa5c32016-04-27 16:49:50 +03001518{
Eli Cohen2f5ff262017-01-03 23:55:21 +02001519 struct mlx5_bfreg_info *bfregi = &context->bfregi;
Guy Levi37aa5c32016-04-27 16:49:50 +03001520 int err;
1521 unsigned long idx;
1522 phys_addr_t pfn, pa;
1523 pgprot_t prot;
Eli Cohenb037c292017-01-03 23:55:26 +02001524 int uars_per_page;
1525
1526 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1527 return -EINVAL;
1528
1529 uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
1530 idx = get_index(vma->vm_pgoff);
1531 if (idx % uars_per_page ||
1532 idx * uars_per_page >= bfregi->num_sys_pages) {
1533 mlx5_ib_warn(dev, "invalid uar index %lu\n", idx);
1534 return -EINVAL;
1535 }
Guy Levi37aa5c32016-04-27 16:49:50 +03001536
1537 switch (cmd) {
1538 case MLX5_IB_MMAP_WC_PAGE:
1539/* Some architectures don't support WC memory */
1540#if defined(CONFIG_X86)
1541 if (!pat_enabled())
1542 return -EPERM;
1543#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
1544 return -EPERM;
1545#endif
1546 /* fall through */
1547 case MLX5_IB_MMAP_REGULAR_PAGE:
1548 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
1549 prot = pgprot_writecombine(vma->vm_page_prot);
1550 break;
1551 case MLX5_IB_MMAP_NC_PAGE:
1552 prot = pgprot_noncached(vma->vm_page_prot);
1553 break;
1554 default:
1555 return -EINVAL;
1556 }
1557
Eli Cohenb037c292017-01-03 23:55:26 +02001558 pfn = uar_index2pfn(dev, bfregi, idx);
Guy Levi37aa5c32016-04-27 16:49:50 +03001559 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
1560
1561 vma->vm_page_prot = prot;
1562 err = io_remap_pfn_range(vma, vma->vm_start, pfn,
1563 PAGE_SIZE, vma->vm_page_prot);
1564 if (err) {
1565 mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
1566 err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
1567 return -EAGAIN;
1568 }
1569
1570 pa = pfn << PAGE_SHIFT;
1571 mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
1572 vma->vm_start, &pa);
1573
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001574 return mlx5_ib_set_vma_data(vma, context);
Guy Levi37aa5c32016-04-27 16:49:50 +03001575}
1576
Eli Cohene126ba92013-07-07 17:25:49 +03001577static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
1578{
1579 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1580 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
Eli Cohene126ba92013-07-07 17:25:49 +03001581 unsigned long command;
Eli Cohene126ba92013-07-07 17:25:49 +03001582 phys_addr_t pfn;
1583
1584 command = get_command(vma->vm_pgoff);
1585 switch (command) {
Guy Levi37aa5c32016-04-27 16:49:50 +03001586 case MLX5_IB_MMAP_WC_PAGE:
1587 case MLX5_IB_MMAP_NC_PAGE:
Eli Cohene126ba92013-07-07 17:25:49 +03001588 case MLX5_IB_MMAP_REGULAR_PAGE:
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001589 return uar_mmap(dev, command, vma, context);
Eli Cohene126ba92013-07-07 17:25:49 +03001590
1591 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
1592 return -ENOSYS;
1593
Matan Barakd69e3bc2015-12-15 20:30:13 +02001594 case MLX5_IB_MMAP_CORE_CLOCK:
Matan Barakd69e3bc2015-12-15 20:30:13 +02001595 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1596 return -EINVAL;
1597
Matan Barak6cbac1e2016-04-14 16:52:10 +03001598 if (vma->vm_flags & VM_WRITE)
Matan Barakd69e3bc2015-12-15 20:30:13 +02001599 return -EPERM;
1600
1601 /* Don't expose to user-space information it shouldn't have */
1602 if (PAGE_SIZE > 4096)
1603 return -EOPNOTSUPP;
1604
1605 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1606 pfn = (dev->mdev->iseg_base +
1607 offsetof(struct mlx5_init_seg, internal_timer_h)) >>
1608 PAGE_SHIFT;
1609 if (io_remap_pfn_range(vma, vma->vm_start, pfn,
1610 PAGE_SIZE, vma->vm_page_prot))
1611 return -EAGAIN;
1612
1613 mlx5_ib_dbg(dev, "mapped internal timer at 0x%lx, PA 0x%llx\n",
1614 vma->vm_start,
1615 (unsigned long long)pfn << PAGE_SHIFT);
1616 break;
Matan Barakd69e3bc2015-12-15 20:30:13 +02001617
Eli Cohene126ba92013-07-07 17:25:49 +03001618 default:
1619 return -EINVAL;
1620 }
1621
1622 return 0;
1623}
1624
Eli Cohene126ba92013-07-07 17:25:49 +03001625static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
1626 struct ib_ucontext *context,
1627 struct ib_udata *udata)
1628{
1629 struct mlx5_ib_alloc_pd_resp resp;
1630 struct mlx5_ib_pd *pd;
1631 int err;
1632
1633 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
1634 if (!pd)
1635 return ERR_PTR(-ENOMEM);
1636
Jack Morgenstein9603b612014-07-28 23:30:22 +03001637 err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
Eli Cohene126ba92013-07-07 17:25:49 +03001638 if (err) {
1639 kfree(pd);
1640 return ERR_PTR(err);
1641 }
1642
1643 if (context) {
1644 resp.pdn = pd->pdn;
1645 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001646 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
Eli Cohene126ba92013-07-07 17:25:49 +03001647 kfree(pd);
1648 return ERR_PTR(-EFAULT);
1649 }
Eli Cohene126ba92013-07-07 17:25:49 +03001650 }
1651
1652 return &pd->ibpd;
1653}
1654
1655static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
1656{
1657 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
1658 struct mlx5_ib_pd *mpd = to_mpd(pd);
1659
Jack Morgenstein9603b612014-07-28 23:30:22 +03001660 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
Eli Cohene126ba92013-07-07 17:25:49 +03001661 kfree(mpd);
1662
1663 return 0;
1664}
1665
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001666enum {
1667 MATCH_CRITERIA_ENABLE_OUTER_BIT,
1668 MATCH_CRITERIA_ENABLE_MISC_BIT,
1669 MATCH_CRITERIA_ENABLE_INNER_BIT
1670};
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001671
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001672#define HEADER_IS_ZERO(match_criteria, headers) \
1673 !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
1674 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
1675
1676static u8 get_match_criteria_enable(u32 *match_criteria)
1677{
1678 u8 match_criteria_enable;
1679
1680 match_criteria_enable =
1681 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
1682 MATCH_CRITERIA_ENABLE_OUTER_BIT;
1683 match_criteria_enable |=
1684 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
1685 MATCH_CRITERIA_ENABLE_MISC_BIT;
1686 match_criteria_enable |=
1687 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
1688 MATCH_CRITERIA_ENABLE_INNER_BIT;
1689
1690 return match_criteria_enable;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001691}
1692
Maor Gottliebca0d4752016-08-30 16:58:35 +03001693static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
1694{
1695 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
1696 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
1697}
1698
Moses Reuben2d1e6972016-11-14 19:04:52 +02001699static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val,
1700 bool inner)
1701{
1702 if (inner) {
1703 MLX5_SET(fte_match_set_misc,
1704 misc_c, inner_ipv6_flow_label, mask);
1705 MLX5_SET(fte_match_set_misc,
1706 misc_v, inner_ipv6_flow_label, val);
1707 } else {
1708 MLX5_SET(fte_match_set_misc,
1709 misc_c, outer_ipv6_flow_label, mask);
1710 MLX5_SET(fte_match_set_misc,
1711 misc_v, outer_ipv6_flow_label, val);
1712 }
1713}
1714
Maor Gottliebca0d4752016-08-30 16:58:35 +03001715static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
1716{
1717 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
1718 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
1719 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
1720 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
1721}
1722
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03001723#define LAST_ETH_FIELD vlan_tag
1724#define LAST_IB_FIELD sl
Maor Gottliebca0d4752016-08-30 16:58:35 +03001725#define LAST_IPV4_FIELD tos
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001726#define LAST_IPV6_FIELD traffic_class
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03001727#define LAST_TCP_UDP_FIELD src_port
Moses Reubenffb30d82016-11-14 19:04:50 +02001728#define LAST_TUNNEL_FIELD tunnel_id
Moses Reuben2ac693f2017-01-18 14:59:50 +02001729#define LAST_FLOW_TAG_FIELD tag_id
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03001730
1731/* Field is the last supported field */
1732#define FIELDS_NOT_SUPPORTED(filter, field)\
1733 memchr_inv((void *)&filter.field +\
1734 sizeof(filter.field), 0,\
1735 sizeof(filter) -\
1736 offsetof(typeof(filter), field) -\
1737 sizeof(filter.field))
1738
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001739static int parse_flow_attr(u32 *match_c, u32 *match_v,
Moses Reuben2ac693f2017-01-18 14:59:50 +02001740 const union ib_flow_spec *ib_spec, u32 *tag_id)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001741{
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001742 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
1743 misc_parameters);
1744 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
1745 misc_parameters);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001746 void *headers_c;
1747 void *headers_v;
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001748
Moses Reuben2d1e6972016-11-14 19:04:52 +02001749 if (ib_spec->type & IB_FLOW_SPEC_INNER) {
1750 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
1751 inner_headers);
1752 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
1753 inner_headers);
1754 } else {
1755 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
1756 outer_headers);
1757 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
1758 outer_headers);
1759 }
1760
1761 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001762 case IB_FLOW_SPEC_ETH:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03001763 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02001764 return -EOPNOTSUPP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001765
Moses Reuben2d1e6972016-11-14 19:04:52 +02001766 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001767 dmac_47_16),
1768 ib_spec->eth.mask.dst_mac);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001769 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001770 dmac_47_16),
1771 ib_spec->eth.val.dst_mac);
1772
Moses Reuben2d1e6972016-11-14 19:04:52 +02001773 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottliebee3da802016-09-12 19:16:24 +03001774 smac_47_16),
1775 ib_spec->eth.mask.src_mac);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001776 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottliebee3da802016-09-12 19:16:24 +03001777 smac_47_16),
1778 ib_spec->eth.val.src_mac);
1779
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001780 if (ib_spec->eth.mask.vlan_tag) {
Moses Reuben2d1e6972016-11-14 19:04:52 +02001781 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Mohamad Haj Yahia10543362016-10-09 16:25:43 +03001782 cvlan_tag, 1);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001783 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Mohamad Haj Yahia10543362016-10-09 16:25:43 +03001784 cvlan_tag, 1);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001785
Moses Reuben2d1e6972016-11-14 19:04:52 +02001786 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001787 first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001788 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001789 first_vid, ntohs(ib_spec->eth.val.vlan_tag));
1790
Moses Reuben2d1e6972016-11-14 19:04:52 +02001791 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001792 first_cfi,
1793 ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001794 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001795 first_cfi,
1796 ntohs(ib_spec->eth.val.vlan_tag) >> 12);
1797
Moses Reuben2d1e6972016-11-14 19:04:52 +02001798 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001799 first_prio,
1800 ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001801 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001802 first_prio,
1803 ntohs(ib_spec->eth.val.vlan_tag) >> 13);
1804 }
Moses Reuben2d1e6972016-11-14 19:04:52 +02001805 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001806 ethertype, ntohs(ib_spec->eth.mask.ether_type));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001807 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001808 ethertype, ntohs(ib_spec->eth.val.ether_type));
1809 break;
1810 case IB_FLOW_SPEC_IPV4:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03001811 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02001812 return -EOPNOTSUPP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001813
Moses Reuben2d1e6972016-11-14 19:04:52 +02001814 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001815 ethertype, 0xffff);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001816 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001817 ethertype, ETH_P_IP);
1818
Moses Reuben2d1e6972016-11-14 19:04:52 +02001819 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001820 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1821 &ib_spec->ipv4.mask.src_ip,
1822 sizeof(ib_spec->ipv4.mask.src_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001823 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001824 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1825 &ib_spec->ipv4.val.src_ip,
1826 sizeof(ib_spec->ipv4.val.src_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001827 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001828 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1829 &ib_spec->ipv4.mask.dst_ip,
1830 sizeof(ib_spec->ipv4.mask.dst_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001831 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001832 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1833 &ib_spec->ipv4.val.dst_ip,
1834 sizeof(ib_spec->ipv4.val.dst_ip));
Maor Gottliebca0d4752016-08-30 16:58:35 +03001835
Moses Reuben2d1e6972016-11-14 19:04:52 +02001836 set_tos(headers_c, headers_v,
Maor Gottliebca0d4752016-08-30 16:58:35 +03001837 ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
1838
Moses Reuben2d1e6972016-11-14 19:04:52 +02001839 set_proto(headers_c, headers_v,
Maor Gottliebca0d4752016-08-30 16:58:35 +03001840 ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001841 break;
Maor Gottlieb026bae02016-06-17 15:14:51 +03001842 case IB_FLOW_SPEC_IPV6:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03001843 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02001844 return -EOPNOTSUPP;
Maor Gottlieb026bae02016-06-17 15:14:51 +03001845
Moses Reuben2d1e6972016-11-14 19:04:52 +02001846 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb026bae02016-06-17 15:14:51 +03001847 ethertype, 0xffff);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001848 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb026bae02016-06-17 15:14:51 +03001849 ethertype, ETH_P_IPV6);
1850
Moses Reuben2d1e6972016-11-14 19:04:52 +02001851 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb026bae02016-06-17 15:14:51 +03001852 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1853 &ib_spec->ipv6.mask.src_ip,
1854 sizeof(ib_spec->ipv6.mask.src_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001855 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb026bae02016-06-17 15:14:51 +03001856 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1857 &ib_spec->ipv6.val.src_ip,
1858 sizeof(ib_spec->ipv6.val.src_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001859 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb026bae02016-06-17 15:14:51 +03001860 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1861 &ib_spec->ipv6.mask.dst_ip,
1862 sizeof(ib_spec->ipv6.mask.dst_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001863 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb026bae02016-06-17 15:14:51 +03001864 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1865 &ib_spec->ipv6.val.dst_ip,
1866 sizeof(ib_spec->ipv6.val.dst_ip));
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001867
Moses Reuben2d1e6972016-11-14 19:04:52 +02001868 set_tos(headers_c, headers_v,
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001869 ib_spec->ipv6.mask.traffic_class,
1870 ib_spec->ipv6.val.traffic_class);
1871
Moses Reuben2d1e6972016-11-14 19:04:52 +02001872 set_proto(headers_c, headers_v,
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001873 ib_spec->ipv6.mask.next_hdr,
1874 ib_spec->ipv6.val.next_hdr);
1875
Moses Reuben2d1e6972016-11-14 19:04:52 +02001876 set_flow_label(misc_params_c, misc_params_v,
1877 ntohl(ib_spec->ipv6.mask.flow_label),
1878 ntohl(ib_spec->ipv6.val.flow_label),
1879 ib_spec->type & IB_FLOW_SPEC_INNER);
1880
Maor Gottlieb026bae02016-06-17 15:14:51 +03001881 break;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001882 case IB_FLOW_SPEC_TCP:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03001883 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
1884 LAST_TCP_UDP_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02001885 return -EOPNOTSUPP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001886
Moses Reuben2d1e6972016-11-14 19:04:52 +02001887 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001888 0xff);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001889 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001890 IPPROTO_TCP);
1891
Moses Reuben2d1e6972016-11-14 19:04:52 +02001892 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001893 ntohs(ib_spec->tcp_udp.mask.src_port));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001894 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001895 ntohs(ib_spec->tcp_udp.val.src_port));
1896
Moses Reuben2d1e6972016-11-14 19:04:52 +02001897 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001898 ntohs(ib_spec->tcp_udp.mask.dst_port));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001899 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001900 ntohs(ib_spec->tcp_udp.val.dst_port));
1901 break;
1902 case IB_FLOW_SPEC_UDP:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03001903 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
1904 LAST_TCP_UDP_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02001905 return -EOPNOTSUPP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001906
Moses Reuben2d1e6972016-11-14 19:04:52 +02001907 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001908 0xff);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001909 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001910 IPPROTO_UDP);
1911
Moses Reuben2d1e6972016-11-14 19:04:52 +02001912 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001913 ntohs(ib_spec->tcp_udp.mask.src_port));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001914 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001915 ntohs(ib_spec->tcp_udp.val.src_port));
1916
Moses Reuben2d1e6972016-11-14 19:04:52 +02001917 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001918 ntohs(ib_spec->tcp_udp.mask.dst_port));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001919 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001920 ntohs(ib_spec->tcp_udp.val.dst_port));
1921 break;
Moses Reubenffb30d82016-11-14 19:04:50 +02001922 case IB_FLOW_SPEC_VXLAN_TUNNEL:
1923 if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
1924 LAST_TUNNEL_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02001925 return -EOPNOTSUPP;
Moses Reubenffb30d82016-11-14 19:04:50 +02001926
1927 MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
1928 ntohl(ib_spec->tunnel.mask.tunnel_id));
1929 MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
1930 ntohl(ib_spec->tunnel.val.tunnel_id));
1931 break;
Moses Reuben2ac693f2017-01-18 14:59:50 +02001932 case IB_FLOW_SPEC_ACTION_TAG:
1933 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
1934 LAST_FLOW_TAG_FIELD))
1935 return -EOPNOTSUPP;
1936 if (ib_spec->flow_tag.tag_id >= BIT(24))
1937 return -EINVAL;
1938
1939 *tag_id = ib_spec->flow_tag.tag_id;
1940 break;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001941 default:
1942 return -EINVAL;
1943 }
1944
1945 return 0;
1946}
1947
1948/* If a flow could catch both multicast and unicast packets,
1949 * it won't fall into the multicast flow steering table and this rule
1950 * could steal other multicast packets.
1951 */
1952static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
1953{
1954 struct ib_flow_spec_eth *eth_spec;
1955
1956 if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
1957 ib_attr->size < sizeof(struct ib_flow_attr) +
1958 sizeof(struct ib_flow_spec_eth) ||
1959 ib_attr->num_of_specs < 1)
1960 return false;
1961
1962 eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1);
1963 if (eth_spec->type != IB_FLOW_SPEC_ETH ||
1964 eth_spec->size != sizeof(*eth_spec))
1965 return false;
1966
1967 return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
1968 is_multicast_ether_addr(eth_spec->val.dst_mac);
1969}
1970
Maor Gottliebdd063d02016-08-28 14:16:32 +03001971static bool is_valid_attr(const struct ib_flow_attr *flow_attr)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001972{
1973 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1974 bool has_ipv4_spec = false;
1975 bool eth_type_ipv4 = true;
1976 unsigned int spec_index;
1977
1978 /* Validate that ethertype is correct */
1979 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
1980 if (ib_spec->type == IB_FLOW_SPEC_ETH &&
1981 ib_spec->eth.mask.ether_type) {
1982 if (!((ib_spec->eth.mask.ether_type == htons(0xffff)) &&
1983 ib_spec->eth.val.ether_type == htons(ETH_P_IP)))
1984 eth_type_ipv4 = false;
1985 } else if (ib_spec->type == IB_FLOW_SPEC_IPV4) {
1986 has_ipv4_spec = true;
1987 }
1988 ib_spec = (void *)ib_spec + ib_spec->size;
1989 }
1990 return !has_ipv4_spec || eth_type_ipv4;
1991}
1992
1993static void put_flow_table(struct mlx5_ib_dev *dev,
1994 struct mlx5_ib_flow_prio *prio, bool ft_added)
1995{
1996 prio->refcount -= !!ft_added;
1997 if (!prio->refcount) {
1998 mlx5_destroy_flow_table(prio->flow_table);
1999 prio->flow_table = NULL;
2000 }
2001}
2002
2003static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
2004{
2005 struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
2006 struct mlx5_ib_flow_handler *handler = container_of(flow_id,
2007 struct mlx5_ib_flow_handler,
2008 ibflow);
2009 struct mlx5_ib_flow_handler *iter, *tmp;
2010
2011 mutex_lock(&dev->flow_db.lock);
2012
2013 list_for_each_entry_safe(iter, tmp, &handler->list, list) {
Mark Bloch74491de2016-08-31 11:24:25 +00002014 mlx5_del_flow_rules(iter->rule);
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002015 put_flow_table(dev, iter->prio, true);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002016 list_del(&iter->list);
2017 kfree(iter);
2018 }
2019
Mark Bloch74491de2016-08-31 11:24:25 +00002020 mlx5_del_flow_rules(handler->rule);
Maor Gottlieb5497adc2016-08-28 14:16:31 +03002021 put_flow_table(dev, handler->prio, true);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002022 mutex_unlock(&dev->flow_db.lock);
2023
2024 kfree(handler);
2025
2026 return 0;
2027}
2028
Maor Gottlieb35d190112016-03-07 18:51:47 +02002029static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
2030{
2031 priority *= 2;
2032 if (!dont_trap)
2033 priority++;
2034 return priority;
2035}
2036
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002037enum flow_table_type {
2038 MLX5_IB_FT_RX,
2039 MLX5_IB_FT_TX
2040};
2041
Maor Gottlieb00b7c2a2017-03-29 06:09:01 +03002042#define MLX5_FS_MAX_TYPES 6
2043#define MLX5_FS_MAX_ENTRIES BIT(16)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002044static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002045 struct ib_flow_attr *flow_attr,
2046 enum flow_table_type ft_type)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002047{
Maor Gottlieb35d190112016-03-07 18:51:47 +02002048 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002049 struct mlx5_flow_namespace *ns = NULL;
2050 struct mlx5_ib_flow_prio *prio;
2051 struct mlx5_flow_table *ft;
Maor Gottliebdac388e2017-03-29 06:09:00 +03002052 int max_table_size;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002053 int num_entries;
2054 int num_groups;
2055 int priority;
2056 int err = 0;
2057
Maor Gottliebdac388e2017-03-29 06:09:00 +03002058 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
2059 log_max_ft_size));
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002060 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
Maor Gottlieb35d190112016-03-07 18:51:47 +02002061 if (flow_is_multicast_only(flow_attr) &&
2062 !dont_trap)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002063 priority = MLX5_IB_FLOW_MCAST_PRIO;
2064 else
Maor Gottlieb35d190112016-03-07 18:51:47 +02002065 priority = ib_prio_to_core_prio(flow_attr->priority,
2066 dont_trap);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002067 ns = mlx5_get_flow_namespace(dev->mdev,
2068 MLX5_FLOW_NAMESPACE_BYPASS);
2069 num_entries = MLX5_FS_MAX_ENTRIES;
2070 num_groups = MLX5_FS_MAX_TYPES;
2071 prio = &dev->flow_db.prios[priority];
2072 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2073 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
2074 ns = mlx5_get_flow_namespace(dev->mdev,
2075 MLX5_FLOW_NAMESPACE_LEFTOVERS);
2076 build_leftovers_ft_param(&priority,
2077 &num_entries,
2078 &num_groups);
2079 prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002080 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2081 if (!MLX5_CAP_FLOWTABLE(dev->mdev,
2082 allow_sniffer_and_nic_rx_shared_tir))
2083 return ERR_PTR(-ENOTSUPP);
2084
2085 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
2086 MLX5_FLOW_NAMESPACE_SNIFFER_RX :
2087 MLX5_FLOW_NAMESPACE_SNIFFER_TX);
2088
2089 prio = &dev->flow_db.sniffer[ft_type];
2090 priority = 0;
2091 num_entries = 1;
2092 num_groups = 1;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002093 }
2094
2095 if (!ns)
2096 return ERR_PTR(-ENOTSUPP);
2097
Maor Gottliebdac388e2017-03-29 06:09:00 +03002098 if (num_entries > max_table_size)
2099 return ERR_PTR(-ENOMEM);
2100
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002101 ft = prio->flow_table;
2102 if (!ft) {
2103 ft = mlx5_create_auto_grouped_flow_table(ns, priority,
2104 num_entries,
Maor Gottliebd63cd282016-04-29 01:36:35 +03002105 num_groups,
Hadar Hen Zionc9f1b072016-11-07 15:14:44 +02002106 0, 0);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002107
2108 if (!IS_ERR(ft)) {
2109 prio->refcount = 0;
2110 prio->flow_table = ft;
2111 } else {
2112 err = PTR_ERR(ft);
2113 }
2114 }
2115
2116 return err ? ERR_PTR(err) : prio;
2117}
2118
2119static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
2120 struct mlx5_ib_flow_prio *ft_prio,
Maor Gottliebdd063d02016-08-28 14:16:32 +03002121 const struct ib_flow_attr *flow_attr,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002122 struct mlx5_flow_destination *dst)
2123{
2124 struct mlx5_flow_table *ft = ft_prio->flow_table;
2125 struct mlx5_ib_flow_handler *handler;
Hadar Hen Zion66958ed2016-11-07 15:14:45 +02002126 struct mlx5_flow_act flow_act = {0};
Maor Gottliebc5bb1732016-07-04 17:23:05 +03002127 struct mlx5_flow_spec *spec;
Maor Gottliebdd063d02016-08-28 14:16:32 +03002128 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002129 unsigned int spec_index;
Moses Reuben2ac693f2017-01-18 14:59:50 +02002130 u32 flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002131 int err = 0;
2132
2133 if (!is_valid_attr(flow_attr))
2134 return ERR_PTR(-EINVAL);
2135
Maor Gottliebc5bb1732016-07-04 17:23:05 +03002136 spec = mlx5_vzalloc(sizeof(*spec));
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002137 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
Maor Gottliebc5bb1732016-07-04 17:23:05 +03002138 if (!handler || !spec) {
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002139 err = -ENOMEM;
2140 goto free;
2141 }
2142
2143 INIT_LIST_HEAD(&handler->list);
2144
2145 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
Maor Gottliebc5bb1732016-07-04 17:23:05 +03002146 err = parse_flow_attr(spec->match_criteria,
Moses Reuben2ac693f2017-01-18 14:59:50 +02002147 spec->match_value, ib_flow, &flow_tag);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002148 if (err < 0)
2149 goto free;
2150
2151 ib_flow += ((union ib_flow_spec *)ib_flow)->size;
2152 }
2153
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002154 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
Hadar Hen Zion66958ed2016-11-07 15:14:45 +02002155 flow_act.action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
Maor Gottlieb35d190112016-03-07 18:51:47 +02002156 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
Moses Reuben2ac693f2017-01-18 14:59:50 +02002157
2158 if (flow_tag != MLX5_FS_DEFAULT_FLOW_TAG &&
2159 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2160 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
2161 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
2162 flow_tag, flow_attr->type);
2163 err = -EINVAL;
2164 goto free;
2165 }
2166 flow_act.flow_tag = flow_tag;
Mark Bloch74491de2016-08-31 11:24:25 +00002167 handler->rule = mlx5_add_flow_rules(ft, spec,
Hadar Hen Zion66958ed2016-11-07 15:14:45 +02002168 &flow_act,
2169 dst, 1);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002170
2171 if (IS_ERR(handler->rule)) {
2172 err = PTR_ERR(handler->rule);
2173 goto free;
2174 }
2175
Maor Gottliebd9d49802016-08-28 14:16:33 +03002176 ft_prio->refcount++;
Maor Gottlieb5497adc2016-08-28 14:16:31 +03002177 handler->prio = ft_prio;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002178
2179 ft_prio->flow_table = ft;
2180free:
2181 if (err)
2182 kfree(handler);
Maor Gottliebc5bb1732016-07-04 17:23:05 +03002183 kvfree(spec);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002184 return err ? ERR_PTR(err) : handler;
2185}
2186
Maor Gottlieb35d190112016-03-07 18:51:47 +02002187static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
2188 struct mlx5_ib_flow_prio *ft_prio,
2189 struct ib_flow_attr *flow_attr,
2190 struct mlx5_flow_destination *dst)
2191{
2192 struct mlx5_ib_flow_handler *handler_dst = NULL;
2193 struct mlx5_ib_flow_handler *handler = NULL;
2194
2195 handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
2196 if (!IS_ERR(handler)) {
2197 handler_dst = create_flow_rule(dev, ft_prio,
2198 flow_attr, dst);
2199 if (IS_ERR(handler_dst)) {
Mark Bloch74491de2016-08-31 11:24:25 +00002200 mlx5_del_flow_rules(handler->rule);
Maor Gottliebd9d49802016-08-28 14:16:33 +03002201 ft_prio->refcount--;
Maor Gottlieb35d190112016-03-07 18:51:47 +02002202 kfree(handler);
2203 handler = handler_dst;
2204 } else {
2205 list_add(&handler_dst->list, &handler->list);
2206 }
2207 }
2208
2209 return handler;
2210}
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002211enum {
2212 LEFTOVERS_MC,
2213 LEFTOVERS_UC,
2214};
2215
2216static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
2217 struct mlx5_ib_flow_prio *ft_prio,
2218 struct ib_flow_attr *flow_attr,
2219 struct mlx5_flow_destination *dst)
2220{
2221 struct mlx5_ib_flow_handler *handler_ucast = NULL;
2222 struct mlx5_ib_flow_handler *handler = NULL;
2223
2224 static struct {
2225 struct ib_flow_attr flow_attr;
2226 struct ib_flow_spec_eth eth_flow;
2227 } leftovers_specs[] = {
2228 [LEFTOVERS_MC] = {
2229 .flow_attr = {
2230 .num_of_specs = 1,
2231 .size = sizeof(leftovers_specs[0])
2232 },
2233 .eth_flow = {
2234 .type = IB_FLOW_SPEC_ETH,
2235 .size = sizeof(struct ib_flow_spec_eth),
2236 .mask = {.dst_mac = {0x1} },
2237 .val = {.dst_mac = {0x1} }
2238 }
2239 },
2240 [LEFTOVERS_UC] = {
2241 .flow_attr = {
2242 .num_of_specs = 1,
2243 .size = sizeof(leftovers_specs[0])
2244 },
2245 .eth_flow = {
2246 .type = IB_FLOW_SPEC_ETH,
2247 .size = sizeof(struct ib_flow_spec_eth),
2248 .mask = {.dst_mac = {0x1} },
2249 .val = {.dst_mac = {} }
2250 }
2251 }
2252 };
2253
2254 handler = create_flow_rule(dev, ft_prio,
2255 &leftovers_specs[LEFTOVERS_MC].flow_attr,
2256 dst);
2257 if (!IS_ERR(handler) &&
2258 flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
2259 handler_ucast = create_flow_rule(dev, ft_prio,
2260 &leftovers_specs[LEFTOVERS_UC].flow_attr,
2261 dst);
2262 if (IS_ERR(handler_ucast)) {
Mark Bloch74491de2016-08-31 11:24:25 +00002263 mlx5_del_flow_rules(handler->rule);
Maor Gottliebd9d49802016-08-28 14:16:33 +03002264 ft_prio->refcount--;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002265 kfree(handler);
2266 handler = handler_ucast;
2267 } else {
2268 list_add(&handler_ucast->list, &handler->list);
2269 }
2270 }
2271
2272 return handler;
2273}
2274
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002275static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
2276 struct mlx5_ib_flow_prio *ft_rx,
2277 struct mlx5_ib_flow_prio *ft_tx,
2278 struct mlx5_flow_destination *dst)
2279{
2280 struct mlx5_ib_flow_handler *handler_rx;
2281 struct mlx5_ib_flow_handler *handler_tx;
2282 int err;
2283 static const struct ib_flow_attr flow_attr = {
2284 .num_of_specs = 0,
2285 .size = sizeof(flow_attr)
2286 };
2287
2288 handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
2289 if (IS_ERR(handler_rx)) {
2290 err = PTR_ERR(handler_rx);
2291 goto err;
2292 }
2293
2294 handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
2295 if (IS_ERR(handler_tx)) {
2296 err = PTR_ERR(handler_tx);
2297 goto err_tx;
2298 }
2299
2300 list_add(&handler_tx->list, &handler_rx->list);
2301
2302 return handler_rx;
2303
2304err_tx:
Mark Bloch74491de2016-08-31 11:24:25 +00002305 mlx5_del_flow_rules(handler_rx->rule);
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002306 ft_rx->refcount--;
2307 kfree(handler_rx);
2308err:
2309 return ERR_PTR(err);
2310}
2311
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002312static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
2313 struct ib_flow_attr *flow_attr,
2314 int domain)
2315{
2316 struct mlx5_ib_dev *dev = to_mdev(qp->device);
Yishai Hadasd9f88e52016-08-28 10:58:37 +03002317 struct mlx5_ib_qp *mqp = to_mqp(qp);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002318 struct mlx5_ib_flow_handler *handler = NULL;
2319 struct mlx5_flow_destination *dst = NULL;
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002320 struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002321 struct mlx5_ib_flow_prio *ft_prio;
2322 int err;
2323
2324 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
Maor Gottliebdac388e2017-03-29 06:09:00 +03002325 return ERR_PTR(-ENOMEM);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002326
2327 if (domain != IB_FLOW_DOMAIN_USER ||
2328 flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
Maor Gottlieb35d190112016-03-07 18:51:47 +02002329 (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002330 return ERR_PTR(-EINVAL);
2331
2332 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
2333 if (!dst)
2334 return ERR_PTR(-ENOMEM);
2335
2336 mutex_lock(&dev->flow_db.lock);
2337
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002338 ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002339 if (IS_ERR(ft_prio)) {
2340 err = PTR_ERR(ft_prio);
2341 goto unlock;
2342 }
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002343 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2344 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
2345 if (IS_ERR(ft_prio_tx)) {
2346 err = PTR_ERR(ft_prio_tx);
2347 ft_prio_tx = NULL;
2348 goto destroy_ft;
2349 }
2350 }
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002351
2352 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
Yishai Hadasd9f88e52016-08-28 10:58:37 +03002353 if (mqp->flags & MLX5_IB_QP_RSS)
2354 dst->tir_num = mqp->rss_qp.tirn;
2355 else
2356 dst->tir_num = mqp->raw_packet_qp.rq.tirn;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002357
2358 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
Maor Gottlieb35d190112016-03-07 18:51:47 +02002359 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
2360 handler = create_dont_trap_rule(dev, ft_prio,
2361 flow_attr, dst);
2362 } else {
2363 handler = create_flow_rule(dev, ft_prio, flow_attr,
2364 dst);
2365 }
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002366 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2367 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
2368 handler = create_leftovers_rule(dev, ft_prio, flow_attr,
2369 dst);
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002370 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2371 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002372 } else {
2373 err = -EINVAL;
2374 goto destroy_ft;
2375 }
2376
2377 if (IS_ERR(handler)) {
2378 err = PTR_ERR(handler);
2379 handler = NULL;
2380 goto destroy_ft;
2381 }
2382
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002383 mutex_unlock(&dev->flow_db.lock);
2384 kfree(dst);
2385
2386 return &handler->ibflow;
2387
2388destroy_ft:
2389 put_flow_table(dev, ft_prio, false);
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002390 if (ft_prio_tx)
2391 put_flow_table(dev, ft_prio_tx, false);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002392unlock:
2393 mutex_unlock(&dev->flow_db.lock);
2394 kfree(dst);
2395 kfree(handler);
2396 return ERR_PTR(err);
2397}
2398
Eli Cohene126ba92013-07-07 17:25:49 +03002399static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2400{
2401 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2402 int err;
2403
Jack Morgenstein9603b612014-07-28 23:30:22 +03002404 err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
Eli Cohene126ba92013-07-07 17:25:49 +03002405 if (err)
2406 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
2407 ibqp->qp_num, gid->raw);
2408
2409 return err;
2410}
2411
2412static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2413{
2414 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2415 int err;
2416
Jack Morgenstein9603b612014-07-28 23:30:22 +03002417 err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
Eli Cohene126ba92013-07-07 17:25:49 +03002418 if (err)
2419 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
2420 ibqp->qp_num, gid->raw);
2421
2422 return err;
2423}
2424
2425static int init_node_data(struct mlx5_ib_dev *dev)
2426{
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03002427 int err;
Eli Cohene126ba92013-07-07 17:25:49 +03002428
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03002429 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
Eli Cohene126ba92013-07-07 17:25:49 +03002430 if (err)
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03002431 return err;
Eli Cohene126ba92013-07-07 17:25:49 +03002432
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03002433 dev->mdev->rev_id = dev->mdev->pdev->revision;
Eli Cohene126ba92013-07-07 17:25:49 +03002434
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03002435 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
Eli Cohene126ba92013-07-07 17:25:49 +03002436}
2437
2438static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
2439 char *buf)
2440{
2441 struct mlx5_ib_dev *dev =
2442 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2443
Jack Morgenstein9603b612014-07-28 23:30:22 +03002444 return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
Eli Cohene126ba92013-07-07 17:25:49 +03002445}
2446
2447static ssize_t show_reg_pages(struct device *device,
2448 struct device_attribute *attr, char *buf)
2449{
2450 struct mlx5_ib_dev *dev =
2451 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2452
Haggai Eran6aec21f2014-12-11 17:04:23 +02002453 return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
Eli Cohene126ba92013-07-07 17:25:49 +03002454}
2455
2456static ssize_t show_hca(struct device *device, struct device_attribute *attr,
2457 char *buf)
2458{
2459 struct mlx5_ib_dev *dev =
2460 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
Jack Morgenstein9603b612014-07-28 23:30:22 +03002461 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
Eli Cohene126ba92013-07-07 17:25:49 +03002462}
2463
Eli Cohene126ba92013-07-07 17:25:49 +03002464static ssize_t show_rev(struct device *device, struct device_attribute *attr,
2465 char *buf)
2466{
2467 struct mlx5_ib_dev *dev =
2468 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
Jack Morgenstein9603b612014-07-28 23:30:22 +03002469 return sprintf(buf, "%x\n", dev->mdev->rev_id);
Eli Cohene126ba92013-07-07 17:25:49 +03002470}
2471
2472static ssize_t show_board(struct device *device, struct device_attribute *attr,
2473 char *buf)
2474{
2475 struct mlx5_ib_dev *dev =
2476 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2477 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
Jack Morgenstein9603b612014-07-28 23:30:22 +03002478 dev->mdev->board_id);
Eli Cohene126ba92013-07-07 17:25:49 +03002479}
2480
2481static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
Eli Cohene126ba92013-07-07 17:25:49 +03002482static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
2483static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
2484static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
2485static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
2486
2487static struct device_attribute *mlx5_class_attributes[] = {
2488 &dev_attr_hw_rev,
Eli Cohene126ba92013-07-07 17:25:49 +03002489 &dev_attr_hca_type,
2490 &dev_attr_board_id,
2491 &dev_attr_fw_pages,
2492 &dev_attr_reg_pages,
2493};
2494
Haggai Eran7722f472016-02-29 15:45:07 +02002495static void pkey_change_handler(struct work_struct *work)
2496{
2497 struct mlx5_ib_port_resources *ports =
2498 container_of(work, struct mlx5_ib_port_resources,
2499 pkey_change_work);
2500
2501 mutex_lock(&ports->devr->mutex);
2502 mlx5_ib_gsi_pkey_change(ports->gsi);
2503 mutex_unlock(&ports->devr->mutex);
2504}
2505
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03002506static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
2507{
2508 struct mlx5_ib_qp *mqp;
2509 struct mlx5_ib_cq *send_mcq, *recv_mcq;
2510 struct mlx5_core_cq *mcq;
2511 struct list_head cq_armed_list;
2512 unsigned long flags_qp;
2513 unsigned long flags_cq;
2514 unsigned long flags;
2515
2516 INIT_LIST_HEAD(&cq_armed_list);
2517
2518 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2519 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2520 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2521 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
2522 if (mqp->sq.tail != mqp->sq.head) {
2523 send_mcq = to_mcq(mqp->ibqp.send_cq);
2524 spin_lock_irqsave(&send_mcq->lock, flags_cq);
2525 if (send_mcq->mcq.comp &&
2526 mqp->ibqp.send_cq->comp_handler) {
2527 if (!send_mcq->mcq.reset_notify_added) {
2528 send_mcq->mcq.reset_notify_added = 1;
2529 list_add_tail(&send_mcq->mcq.reset_notify,
2530 &cq_armed_list);
2531 }
2532 }
2533 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
2534 }
2535 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
2536 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
2537 /* no handling is needed for SRQ */
2538 if (!mqp->ibqp.srq) {
2539 if (mqp->rq.tail != mqp->rq.head) {
2540 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
2541 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
2542 if (recv_mcq->mcq.comp &&
2543 mqp->ibqp.recv_cq->comp_handler) {
2544 if (!recv_mcq->mcq.reset_notify_added) {
2545 recv_mcq->mcq.reset_notify_added = 1;
2546 list_add_tail(&recv_mcq->mcq.reset_notify,
2547 &cq_armed_list);
2548 }
2549 }
2550 spin_unlock_irqrestore(&recv_mcq->lock,
2551 flags_cq);
2552 }
2553 }
2554 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
2555 }
2556 /*At that point all inflight post send were put to be executed as of we
2557 * lock/unlock above locks Now need to arm all involved CQs.
2558 */
2559 list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
2560 mcq->comp(mcq);
2561 }
2562 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2563}
2564
Jack Morgenstein9603b612014-07-28 23:30:22 +03002565static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03002566 enum mlx5_dev_event event, unsigned long param)
Eli Cohene126ba92013-07-07 17:25:49 +03002567{
Jack Morgenstein9603b612014-07-28 23:30:22 +03002568 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
Eli Cohene126ba92013-07-07 17:25:49 +03002569 struct ib_event ibev;
Eli Cohendbaaff22016-10-27 16:36:44 +03002570 bool fatal = false;
Eli Cohene126ba92013-07-07 17:25:49 +03002571 u8 port = 0;
2572
2573 switch (event) {
2574 case MLX5_DEV_EVENT_SYS_ERROR:
Eli Cohene126ba92013-07-07 17:25:49 +03002575 ibev.event = IB_EVENT_DEVICE_FATAL;
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03002576 mlx5_ib_handle_internal_error(ibdev);
Eli Cohendbaaff22016-10-27 16:36:44 +03002577 fatal = true;
Eli Cohene126ba92013-07-07 17:25:49 +03002578 break;
2579
2580 case MLX5_DEV_EVENT_PORT_UP:
Eli Cohene126ba92013-07-07 17:25:49 +03002581 case MLX5_DEV_EVENT_PORT_DOWN:
Noa Osherovich2788cf32016-06-04 15:15:29 +03002582 case MLX5_DEV_EVENT_PORT_INITIALIZED:
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03002583 port = (u8)param;
Aviv Heller5ec8c832016-09-18 20:48:00 +03002584
2585 /* In RoCE, port up/down events are handled in
2586 * mlx5_netdev_event().
2587 */
2588 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2589 IB_LINK_LAYER_ETHERNET)
2590 return;
2591
2592 ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
2593 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
Eli Cohene126ba92013-07-07 17:25:49 +03002594 break;
2595
Eli Cohene126ba92013-07-07 17:25:49 +03002596 case MLX5_DEV_EVENT_LID_CHANGE:
2597 ibev.event = IB_EVENT_LID_CHANGE;
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03002598 port = (u8)param;
Eli Cohene126ba92013-07-07 17:25:49 +03002599 break;
2600
2601 case MLX5_DEV_EVENT_PKEY_CHANGE:
2602 ibev.event = IB_EVENT_PKEY_CHANGE;
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03002603 port = (u8)param;
Haggai Eran7722f472016-02-29 15:45:07 +02002604
2605 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
Eli Cohene126ba92013-07-07 17:25:49 +03002606 break;
2607
2608 case MLX5_DEV_EVENT_GUID_CHANGE:
2609 ibev.event = IB_EVENT_GID_CHANGE;
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03002610 port = (u8)param;
Eli Cohene126ba92013-07-07 17:25:49 +03002611 break;
2612
2613 case MLX5_DEV_EVENT_CLIENT_REREG:
2614 ibev.event = IB_EVENT_CLIENT_REREGISTER;
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03002615 port = (u8)param;
Eli Cohene126ba92013-07-07 17:25:49 +03002616 break;
Saeed Mahameedbdc37922016-09-29 19:35:38 +03002617 default:
2618 return;
Eli Cohene126ba92013-07-07 17:25:49 +03002619 }
2620
2621 ibev.device = &ibdev->ib_dev;
2622 ibev.element.port_num = port;
2623
Eli Cohena0c84c32013-09-11 16:35:27 +03002624 if (port < 1 || port > ibdev->num_ports) {
2625 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
2626 return;
2627 }
2628
Eli Cohene126ba92013-07-07 17:25:49 +03002629 if (ibdev->ib_active)
2630 ib_dispatch_event(&ibev);
Eli Cohendbaaff22016-10-27 16:36:44 +03002631
2632 if (fatal)
2633 ibdev->ib_active = false;
Eli Cohene126ba92013-07-07 17:25:49 +03002634}
2635
Maor Gottliebc43f1112017-01-18 14:10:33 +02002636static int set_has_smi_cap(struct mlx5_ib_dev *dev)
2637{
2638 struct mlx5_hca_vport_context vport_ctx;
2639 int err;
2640 int port;
2641
2642 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
2643 dev->mdev->port_caps[port - 1].has_smi = false;
2644 if (MLX5_CAP_GEN(dev->mdev, port_type) ==
2645 MLX5_CAP_PORT_TYPE_IB) {
2646 if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
2647 err = mlx5_query_hca_vport_context(dev->mdev, 0,
2648 port, 0,
2649 &vport_ctx);
2650 if (err) {
2651 mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
2652 port, err);
2653 return err;
2654 }
2655 dev->mdev->port_caps[port - 1].has_smi =
2656 vport_ctx.has_smi;
2657 } else {
2658 dev->mdev->port_caps[port - 1].has_smi = true;
2659 }
2660 }
2661 }
2662 return 0;
2663}
2664
Eli Cohene126ba92013-07-07 17:25:49 +03002665static void get_ext_port_caps(struct mlx5_ib_dev *dev)
2666{
2667 int port;
2668
Saeed Mahameed938fe832015-05-28 22:28:41 +03002669 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
Eli Cohene126ba92013-07-07 17:25:49 +03002670 mlx5_query_ext_port_caps(dev, port);
2671}
2672
2673static int get_port_caps(struct mlx5_ib_dev *dev)
2674{
2675 struct ib_device_attr *dprops = NULL;
2676 struct ib_port_attr *pprops = NULL;
Dan Carpenterf614fc12015-01-12 11:56:58 +03002677 int err = -ENOMEM;
Eli Cohene126ba92013-07-07 17:25:49 +03002678 int port;
Matan Barak2528e332015-06-11 16:35:25 +03002679 struct ib_udata uhw = {.inlen = 0, .outlen = 0};
Eli Cohene126ba92013-07-07 17:25:49 +03002680
2681 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
2682 if (!pprops)
2683 goto out;
2684
2685 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
2686 if (!dprops)
2687 goto out;
2688
Maor Gottliebc43f1112017-01-18 14:10:33 +02002689 err = set_has_smi_cap(dev);
2690 if (err)
2691 goto out;
2692
Matan Barak2528e332015-06-11 16:35:25 +03002693 err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
Eli Cohene126ba92013-07-07 17:25:49 +03002694 if (err) {
2695 mlx5_ib_warn(dev, "query_device failed %d\n", err);
2696 goto out;
2697 }
2698
Saeed Mahameed938fe832015-05-28 22:28:41 +03002699 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
Or Gerlitzc4550c62017-01-24 13:02:39 +02002700 memset(pprops, 0, sizeof(*pprops));
Eli Cohene126ba92013-07-07 17:25:49 +03002701 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
2702 if (err) {
Saeed Mahameed938fe832015-05-28 22:28:41 +03002703 mlx5_ib_warn(dev, "query_port %d failed %d\n",
2704 port, err);
Eli Cohene126ba92013-07-07 17:25:49 +03002705 break;
2706 }
Saeed Mahameed938fe832015-05-28 22:28:41 +03002707 dev->mdev->port_caps[port - 1].pkey_table_len =
2708 dprops->max_pkeys;
2709 dev->mdev->port_caps[port - 1].gid_table_len =
2710 pprops->gid_tbl_len;
Eli Cohene126ba92013-07-07 17:25:49 +03002711 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
2712 dprops->max_pkeys, pprops->gid_tbl_len);
2713 }
2714
2715out:
2716 kfree(pprops);
2717 kfree(dprops);
2718
2719 return err;
2720}
2721
2722static void destroy_umrc_res(struct mlx5_ib_dev *dev)
2723{
2724 int err;
2725
2726 err = mlx5_mr_cache_cleanup(dev);
2727 if (err)
2728 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
2729
2730 mlx5_ib_destroy_qp(dev->umrc.qp);
Christoph Hellwigadd08d72016-03-03 09:38:22 +01002731 ib_free_cq(dev->umrc.cq);
Eli Cohene126ba92013-07-07 17:25:49 +03002732 ib_dealloc_pd(dev->umrc.pd);
2733}
2734
2735enum {
2736 MAX_UMR_WR = 128,
2737};
2738
2739static int create_umr_res(struct mlx5_ib_dev *dev)
2740{
2741 struct ib_qp_init_attr *init_attr = NULL;
2742 struct ib_qp_attr *attr = NULL;
2743 struct ib_pd *pd;
2744 struct ib_cq *cq;
2745 struct ib_qp *qp;
Eli Cohene126ba92013-07-07 17:25:49 +03002746 int ret;
2747
2748 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
2749 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
2750 if (!attr || !init_attr) {
2751 ret = -ENOMEM;
2752 goto error_0;
2753 }
2754
Christoph Hellwiged082d32016-09-05 12:56:17 +02002755 pd = ib_alloc_pd(&dev->ib_dev, 0);
Eli Cohene126ba92013-07-07 17:25:49 +03002756 if (IS_ERR(pd)) {
2757 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
2758 ret = PTR_ERR(pd);
2759 goto error_0;
2760 }
2761
Christoph Hellwigadd08d72016-03-03 09:38:22 +01002762 cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
Eli Cohene126ba92013-07-07 17:25:49 +03002763 if (IS_ERR(cq)) {
2764 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
2765 ret = PTR_ERR(cq);
2766 goto error_2;
2767 }
Eli Cohene126ba92013-07-07 17:25:49 +03002768
2769 init_attr->send_cq = cq;
2770 init_attr->recv_cq = cq;
2771 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
2772 init_attr->cap.max_send_wr = MAX_UMR_WR;
2773 init_attr->cap.max_send_sge = 1;
2774 init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
2775 init_attr->port_num = 1;
2776 qp = mlx5_ib_create_qp(pd, init_attr, NULL);
2777 if (IS_ERR(qp)) {
2778 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
2779 ret = PTR_ERR(qp);
2780 goto error_3;
2781 }
2782 qp->device = &dev->ib_dev;
2783 qp->real_qp = qp;
2784 qp->uobject = NULL;
2785 qp->qp_type = MLX5_IB_QPT_REG_UMR;
2786
2787 attr->qp_state = IB_QPS_INIT;
2788 attr->port_num = 1;
2789 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
2790 IB_QP_PORT, NULL);
2791 if (ret) {
2792 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
2793 goto error_4;
2794 }
2795
2796 memset(attr, 0, sizeof(*attr));
2797 attr->qp_state = IB_QPS_RTR;
2798 attr->path_mtu = IB_MTU_256;
2799
2800 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
2801 if (ret) {
2802 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
2803 goto error_4;
2804 }
2805
2806 memset(attr, 0, sizeof(*attr));
2807 attr->qp_state = IB_QPS_RTS;
2808 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
2809 if (ret) {
2810 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
2811 goto error_4;
2812 }
2813
2814 dev->umrc.qp = qp;
2815 dev->umrc.cq = cq;
Eli Cohene126ba92013-07-07 17:25:49 +03002816 dev->umrc.pd = pd;
2817
2818 sema_init(&dev->umrc.sem, MAX_UMR_WR);
2819 ret = mlx5_mr_cache_init(dev);
2820 if (ret) {
2821 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
2822 goto error_4;
2823 }
2824
2825 kfree(attr);
2826 kfree(init_attr);
2827
2828 return 0;
2829
2830error_4:
2831 mlx5_ib_destroy_qp(qp);
2832
2833error_3:
Christoph Hellwigadd08d72016-03-03 09:38:22 +01002834 ib_free_cq(cq);
Eli Cohene126ba92013-07-07 17:25:49 +03002835
2836error_2:
Eli Cohene126ba92013-07-07 17:25:49 +03002837 ib_dealloc_pd(pd);
2838
2839error_0:
2840 kfree(attr);
2841 kfree(init_attr);
2842 return ret;
2843}
2844
2845static int create_dev_resources(struct mlx5_ib_resources *devr)
2846{
2847 struct ib_srq_init_attr attr;
2848 struct mlx5_ib_dev *dev;
Matan Barakbcf4c1e2015-06-11 16:35:20 +03002849 struct ib_cq_init_attr cq_attr = {.cqe = 1};
Haggai Eran7722f472016-02-29 15:45:07 +02002850 int port;
Eli Cohene126ba92013-07-07 17:25:49 +03002851 int ret = 0;
2852
2853 dev = container_of(devr, struct mlx5_ib_dev, devr);
2854
Haggai Erand16e91d2016-02-29 15:45:05 +02002855 mutex_init(&devr->mutex);
2856
Eli Cohene126ba92013-07-07 17:25:49 +03002857 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
2858 if (IS_ERR(devr->p0)) {
2859 ret = PTR_ERR(devr->p0);
2860 goto error0;
2861 }
2862 devr->p0->device = &dev->ib_dev;
2863 devr->p0->uobject = NULL;
2864 atomic_set(&devr->p0->usecnt, 0);
2865
Matan Barakbcf4c1e2015-06-11 16:35:20 +03002866 devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
Eli Cohene126ba92013-07-07 17:25:49 +03002867 if (IS_ERR(devr->c0)) {
2868 ret = PTR_ERR(devr->c0);
2869 goto error1;
2870 }
2871 devr->c0->device = &dev->ib_dev;
2872 devr->c0->uobject = NULL;
2873 devr->c0->comp_handler = NULL;
2874 devr->c0->event_handler = NULL;
2875 devr->c0->cq_context = NULL;
2876 atomic_set(&devr->c0->usecnt, 0);
2877
2878 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
2879 if (IS_ERR(devr->x0)) {
2880 ret = PTR_ERR(devr->x0);
2881 goto error2;
2882 }
2883 devr->x0->device = &dev->ib_dev;
2884 devr->x0->inode = NULL;
2885 atomic_set(&devr->x0->usecnt, 0);
2886 mutex_init(&devr->x0->tgt_qp_mutex);
2887 INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
2888
2889 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
2890 if (IS_ERR(devr->x1)) {
2891 ret = PTR_ERR(devr->x1);
2892 goto error3;
2893 }
2894 devr->x1->device = &dev->ib_dev;
2895 devr->x1->inode = NULL;
2896 atomic_set(&devr->x1->usecnt, 0);
2897 mutex_init(&devr->x1->tgt_qp_mutex);
2898 INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
2899
2900 memset(&attr, 0, sizeof(attr));
2901 attr.attr.max_sge = 1;
2902 attr.attr.max_wr = 1;
2903 attr.srq_type = IB_SRQT_XRC;
2904 attr.ext.xrc.cq = devr->c0;
2905 attr.ext.xrc.xrcd = devr->x0;
2906
2907 devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
2908 if (IS_ERR(devr->s0)) {
2909 ret = PTR_ERR(devr->s0);
2910 goto error4;
2911 }
2912 devr->s0->device = &dev->ib_dev;
2913 devr->s0->pd = devr->p0;
2914 devr->s0->uobject = NULL;
2915 devr->s0->event_handler = NULL;
2916 devr->s0->srq_context = NULL;
2917 devr->s0->srq_type = IB_SRQT_XRC;
2918 devr->s0->ext.xrc.xrcd = devr->x0;
2919 devr->s0->ext.xrc.cq = devr->c0;
2920 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
2921 atomic_inc(&devr->s0->ext.xrc.cq->usecnt);
2922 atomic_inc(&devr->p0->usecnt);
2923 atomic_set(&devr->s0->usecnt, 0);
2924
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03002925 memset(&attr, 0, sizeof(attr));
2926 attr.attr.max_sge = 1;
2927 attr.attr.max_wr = 1;
2928 attr.srq_type = IB_SRQT_BASIC;
2929 devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
2930 if (IS_ERR(devr->s1)) {
2931 ret = PTR_ERR(devr->s1);
2932 goto error5;
2933 }
2934 devr->s1->device = &dev->ib_dev;
2935 devr->s1->pd = devr->p0;
2936 devr->s1->uobject = NULL;
2937 devr->s1->event_handler = NULL;
2938 devr->s1->srq_context = NULL;
2939 devr->s1->srq_type = IB_SRQT_BASIC;
2940 devr->s1->ext.xrc.cq = devr->c0;
2941 atomic_inc(&devr->p0->usecnt);
2942 atomic_set(&devr->s0->usecnt, 0);
2943
Haggai Eran7722f472016-02-29 15:45:07 +02002944 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
2945 INIT_WORK(&devr->ports[port].pkey_change_work,
2946 pkey_change_handler);
2947 devr->ports[port].devr = devr;
2948 }
2949
Eli Cohene126ba92013-07-07 17:25:49 +03002950 return 0;
2951
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03002952error5:
2953 mlx5_ib_destroy_srq(devr->s0);
Eli Cohene126ba92013-07-07 17:25:49 +03002954error4:
2955 mlx5_ib_dealloc_xrcd(devr->x1);
2956error3:
2957 mlx5_ib_dealloc_xrcd(devr->x0);
2958error2:
2959 mlx5_ib_destroy_cq(devr->c0);
2960error1:
2961 mlx5_ib_dealloc_pd(devr->p0);
2962error0:
2963 return ret;
2964}
2965
2966static void destroy_dev_resources(struct mlx5_ib_resources *devr)
2967{
Haggai Eran7722f472016-02-29 15:45:07 +02002968 struct mlx5_ib_dev *dev =
2969 container_of(devr, struct mlx5_ib_dev, devr);
2970 int port;
2971
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03002972 mlx5_ib_destroy_srq(devr->s1);
Eli Cohene126ba92013-07-07 17:25:49 +03002973 mlx5_ib_destroy_srq(devr->s0);
2974 mlx5_ib_dealloc_xrcd(devr->x0);
2975 mlx5_ib_dealloc_xrcd(devr->x1);
2976 mlx5_ib_destroy_cq(devr->c0);
2977 mlx5_ib_dealloc_pd(devr->p0);
Haggai Eran7722f472016-02-29 15:45:07 +02002978
2979 /* Make sure no change P_Key work items are still executing */
2980 for (port = 0; port < dev->num_ports; ++port)
2981 cancel_work_sync(&devr->ports[port].pkey_change_work);
Eli Cohene126ba92013-07-07 17:25:49 +03002982}
2983
Achiad Shochate53505a2015-12-23 18:47:25 +02002984static u32 get_core_cap_flags(struct ib_device *ibdev)
2985{
2986 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2987 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
2988 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
2989 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
2990 u32 ret = 0;
2991
2992 if (ll == IB_LINK_LAYER_INFINIBAND)
2993 return RDMA_CORE_PORT_IBA_IB;
2994
Or Gerlitz72cd5712017-01-24 13:02:36 +02002995 ret = RDMA_CORE_PORT_RAW_PACKET;
2996
Achiad Shochate53505a2015-12-23 18:47:25 +02002997 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
Or Gerlitz72cd5712017-01-24 13:02:36 +02002998 return ret;
Achiad Shochate53505a2015-12-23 18:47:25 +02002999
3000 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
Or Gerlitz72cd5712017-01-24 13:02:36 +02003001 return ret;
Achiad Shochate53505a2015-12-23 18:47:25 +02003002
3003 if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
3004 ret |= RDMA_CORE_PORT_IBA_ROCE;
3005
3006 if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
3007 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3008
3009 return ret;
3010}
3011
Ira Weiny77386132015-05-13 20:02:58 -04003012static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
3013 struct ib_port_immutable *immutable)
3014{
3015 struct ib_port_attr attr;
Or Gerlitzca5b91d2016-11-27 16:51:36 +02003016 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3017 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
Ira Weiny77386132015-05-13 20:02:58 -04003018 int err;
3019
Or Gerlitzc4550c62017-01-24 13:02:39 +02003020 immutable->core_cap_flags = get_core_cap_flags(ibdev);
3021
3022 err = ib_query_port(ibdev, port_num, &attr);
Ira Weiny77386132015-05-13 20:02:58 -04003023 if (err)
3024 return err;
3025
3026 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3027 immutable->gid_tbl_len = attr.gid_tbl_len;
Achiad Shochate53505a2015-12-23 18:47:25 +02003028 immutable->core_cap_flags = get_core_cap_flags(ibdev);
Or Gerlitzca5b91d2016-11-27 16:51:36 +02003029 if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
3030 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
Ira Weiny77386132015-05-13 20:02:58 -04003031
3032 return 0;
3033}
3034
Ira Weinyc7342822016-06-15 02:22:01 -04003035static void get_dev_fw_str(struct ib_device *ibdev, char *str,
3036 size_t str_len)
3037{
3038 struct mlx5_ib_dev *dev =
3039 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
3040 snprintf(str, str_len, "%d.%d.%04d", fw_rev_maj(dev->mdev),
3041 fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
3042}
3043
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003044static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
Aviv Heller9ef9c642016-09-18 20:48:01 +03003045{
3046 struct mlx5_core_dev *mdev = dev->mdev;
3047 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
3048 MLX5_FLOW_NAMESPACE_LAG);
3049 struct mlx5_flow_table *ft;
3050 int err;
3051
3052 if (!ns || !mlx5_lag_is_active(mdev))
3053 return 0;
3054
3055 err = mlx5_cmd_create_vport_lag(mdev);
3056 if (err)
3057 return err;
3058
3059 ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
3060 if (IS_ERR(ft)) {
3061 err = PTR_ERR(ft);
3062 goto err_destroy_vport_lag;
3063 }
3064
3065 dev->flow_db.lag_demux_ft = ft;
3066 return 0;
3067
3068err_destroy_vport_lag:
3069 mlx5_cmd_destroy_vport_lag(mdev);
3070 return err;
3071}
3072
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003073static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
Aviv Heller9ef9c642016-09-18 20:48:01 +03003074{
3075 struct mlx5_core_dev *mdev = dev->mdev;
3076
3077 if (dev->flow_db.lag_demux_ft) {
3078 mlx5_destroy_flow_table(dev->flow_db.lag_demux_ft);
3079 dev->flow_db.lag_demux_ft = NULL;
3080
3081 mlx5_cmd_destroy_vport_lag(mdev);
3082 }
3083}
3084
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003085static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev)
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003086{
Achiad Shochate53505a2015-12-23 18:47:25 +02003087 int err;
3088
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003089 dev->roce.nb.notifier_call = mlx5_netdev_event;
Achiad Shochate53505a2015-12-23 18:47:25 +02003090 err = register_netdevice_notifier(&dev->roce.nb);
Aviv Heller5ec8c832016-09-18 20:48:00 +03003091 if (err) {
3092 dev->roce.nb.notifier_call = NULL;
Achiad Shochate53505a2015-12-23 18:47:25 +02003093 return err;
Aviv Heller5ec8c832016-09-18 20:48:00 +03003094 }
Achiad Shochate53505a2015-12-23 18:47:25 +02003095
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003096 return 0;
3097}
Achiad Shochate53505a2015-12-23 18:47:25 +02003098
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003099static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +03003100{
3101 if (dev->roce.nb.notifier_call) {
3102 unregister_netdevice_notifier(&dev->roce.nb);
3103 dev->roce.nb.notifier_call = NULL;
3104 }
3105}
3106
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003107static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +03003108{
Eli Cohene126ba92013-07-07 17:25:49 +03003109 int err;
3110
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003111 err = mlx5_add_netdev_notifier(dev);
3112 if (err)
Achiad Shochate53505a2015-12-23 18:47:25 +02003113 return err;
Achiad Shochate53505a2015-12-23 18:47:25 +02003114
Or Gerlitzca5b91d2016-11-27 16:51:36 +02003115 if (MLX5_CAP_GEN(dev->mdev, roce)) {
3116 err = mlx5_nic_vport_enable_roce(dev->mdev);
3117 if (err)
3118 goto err_unregister_netdevice_notifier;
3119 }
Achiad Shochate53505a2015-12-23 18:47:25 +02003120
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003121 err = mlx5_eth_lag_init(dev);
Aviv Heller9ef9c642016-09-18 20:48:01 +03003122 if (err)
3123 goto err_disable_roce;
3124
Achiad Shochate53505a2015-12-23 18:47:25 +02003125 return 0;
3126
Aviv Heller9ef9c642016-09-18 20:48:01 +03003127err_disable_roce:
Or Gerlitzca5b91d2016-11-27 16:51:36 +02003128 if (MLX5_CAP_GEN(dev->mdev, roce))
3129 mlx5_nic_vport_disable_roce(dev->mdev);
Aviv Heller9ef9c642016-09-18 20:48:01 +03003130
Achiad Shochate53505a2015-12-23 18:47:25 +02003131err_unregister_netdevice_notifier:
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003132 mlx5_remove_netdev_notifier(dev);
Achiad Shochate53505a2015-12-23 18:47:25 +02003133 return err;
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003134}
3135
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003136static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003137{
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003138 mlx5_eth_lag_cleanup(dev);
Or Gerlitzca5b91d2016-11-27 16:51:36 +02003139 if (MLX5_CAP_GEN(dev->mdev, roce))
3140 mlx5_nic_vport_disable_roce(dev->mdev);
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003141}
3142
Kamal Heib7c16f472017-01-18 15:25:09 +02003143struct mlx5_ib_q_counter {
3144 const char *name;
3145 size_t offset;
3146};
3147
3148#define INIT_Q_COUNTER(_name) \
3149 { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
3150
3151static const struct mlx5_ib_q_counter basic_q_cnts[] = {
3152 INIT_Q_COUNTER(rx_write_requests),
3153 INIT_Q_COUNTER(rx_read_requests),
3154 INIT_Q_COUNTER(rx_atomic_requests),
3155 INIT_Q_COUNTER(out_of_buffer),
3156};
3157
3158static const struct mlx5_ib_q_counter out_of_seq_q_cnts[] = {
3159 INIT_Q_COUNTER(out_of_sequence),
3160};
3161
3162static const struct mlx5_ib_q_counter retrans_q_cnts[] = {
3163 INIT_Q_COUNTER(duplicate_request),
3164 INIT_Q_COUNTER(rnr_nak_retry_err),
3165 INIT_Q_COUNTER(packet_seq_err),
3166 INIT_Q_COUNTER(implied_nak_seq_err),
3167 INIT_Q_COUNTER(local_ack_timeout_err),
3168};
3169
Mark Bloch0837e862016-06-17 15:10:55 +03003170static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
3171{
3172 unsigned int i;
3173
Kamal Heib7c16f472017-01-18 15:25:09 +02003174 for (i = 0; i < dev->num_ports; i++) {
Mark Bloch0837e862016-06-17 15:10:55 +03003175 mlx5_core_dealloc_q_counter(dev->mdev,
Kamal Heib7c16f472017-01-18 15:25:09 +02003176 dev->port[i].q_cnts.set_id);
3177 kfree(dev->port[i].q_cnts.names);
3178 kfree(dev->port[i].q_cnts.offsets);
3179 }
3180}
3181
3182static int __mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev,
3183 const char ***names,
3184 size_t **offsets,
3185 u32 *num)
3186{
3187 u32 num_counters;
3188
3189 num_counters = ARRAY_SIZE(basic_q_cnts);
3190
3191 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
3192 num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
3193
3194 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
3195 num_counters += ARRAY_SIZE(retrans_q_cnts);
3196
3197 *names = kcalloc(num_counters, sizeof(**names), GFP_KERNEL);
3198 if (!*names)
3199 return -ENOMEM;
3200
3201 *offsets = kcalloc(num_counters, sizeof(**offsets), GFP_KERNEL);
3202 if (!*offsets)
3203 goto err_names;
3204
3205 *num = num_counters;
3206
3207 return 0;
3208
3209err_names:
3210 kfree(*names);
3211 return -ENOMEM;
3212}
3213
3214static void mlx5_ib_fill_q_counters(struct mlx5_ib_dev *dev,
3215 const char **names,
3216 size_t *offsets)
3217{
3218 int i;
3219 int j = 0;
3220
3221 for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
3222 names[j] = basic_q_cnts[i].name;
3223 offsets[j] = basic_q_cnts[i].offset;
3224 }
3225
3226 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
3227 for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
3228 names[j] = out_of_seq_q_cnts[i].name;
3229 offsets[j] = out_of_seq_q_cnts[i].offset;
3230 }
3231 }
3232
3233 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
3234 for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
3235 names[j] = retrans_q_cnts[i].name;
3236 offsets[j] = retrans_q_cnts[i].offset;
3237 }
3238 }
Mark Bloch0837e862016-06-17 15:10:55 +03003239}
3240
3241static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev)
3242{
3243 int i;
3244 int ret;
3245
3246 for (i = 0; i < dev->num_ports; i++) {
Kamal Heib7c16f472017-01-18 15:25:09 +02003247 struct mlx5_ib_port *port = &dev->port[i];
3248
Mark Bloch0837e862016-06-17 15:10:55 +03003249 ret = mlx5_core_alloc_q_counter(dev->mdev,
Kamal Heib7c16f472017-01-18 15:25:09 +02003250 &port->q_cnts.set_id);
Mark Bloch0837e862016-06-17 15:10:55 +03003251 if (ret) {
3252 mlx5_ib_warn(dev,
3253 "couldn't allocate queue counter for port %d, err %d\n",
3254 i + 1, ret);
3255 goto dealloc_counters;
3256 }
Kamal Heib7c16f472017-01-18 15:25:09 +02003257
3258 ret = __mlx5_ib_alloc_q_counters(dev,
3259 &port->q_cnts.names,
3260 &port->q_cnts.offsets,
3261 &port->q_cnts.num_counters);
3262 if (ret)
3263 goto dealloc_counters;
3264
3265 mlx5_ib_fill_q_counters(dev, port->q_cnts.names,
3266 port->q_cnts.offsets);
Mark Bloch0837e862016-06-17 15:10:55 +03003267 }
3268
3269 return 0;
3270
3271dealloc_counters:
3272 while (--i >= 0)
3273 mlx5_core_dealloc_q_counter(dev->mdev,
Kamal Heib7c16f472017-01-18 15:25:09 +02003274 dev->port[i].q_cnts.set_id);
Mark Bloch0837e862016-06-17 15:10:55 +03003275
3276 return ret;
3277}
3278
Mark Bloch0ad17a82016-06-17 15:10:56 +03003279static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
3280 u8 port_num)
3281{
Kamal Heib7c16f472017-01-18 15:25:09 +02003282 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3283 struct mlx5_ib_port *port = &dev->port[port_num - 1];
Mark Bloch0ad17a82016-06-17 15:10:56 +03003284
3285 /* We support only per port stats */
3286 if (port_num == 0)
3287 return NULL;
3288
Kamal Heib7c16f472017-01-18 15:25:09 +02003289 return rdma_alloc_hw_stats_struct(port->q_cnts.names,
3290 port->q_cnts.num_counters,
Mark Bloch0ad17a82016-06-17 15:10:56 +03003291 RDMA_HW_STATS_DEFAULT_LIFESPAN);
3292}
3293
3294static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
3295 struct rdma_hw_stats *stats,
Kamal Heib7c16f472017-01-18 15:25:09 +02003296 u8 port_num, int index)
Mark Bloch0ad17a82016-06-17 15:10:56 +03003297{
3298 struct mlx5_ib_dev *dev = to_mdev(ibdev);
Kamal Heib7c16f472017-01-18 15:25:09 +02003299 struct mlx5_ib_port *port = &dev->port[port_num - 1];
Mark Bloch0ad17a82016-06-17 15:10:56 +03003300 int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
3301 void *out;
3302 __be32 val;
3303 int ret;
3304 int i;
3305
Kamal Heib7c16f472017-01-18 15:25:09 +02003306 if (!stats)
Mark Bloch0ad17a82016-06-17 15:10:56 +03003307 return -ENOSYS;
3308
3309 out = mlx5_vzalloc(outlen);
3310 if (!out)
3311 return -ENOMEM;
3312
3313 ret = mlx5_core_query_q_counter(dev->mdev,
Kamal Heib7c16f472017-01-18 15:25:09 +02003314 port->q_cnts.set_id, 0,
Mark Bloch0ad17a82016-06-17 15:10:56 +03003315 out, outlen);
3316 if (ret)
3317 goto free;
3318
Kamal Heib7c16f472017-01-18 15:25:09 +02003319 for (i = 0; i < port->q_cnts.num_counters; i++) {
3320 val = *(__be32 *)(out + port->q_cnts.offsets[i]);
Mark Bloch0ad17a82016-06-17 15:10:56 +03003321 stats->value[i] = (u64)be32_to_cpu(val);
3322 }
Kamal Heib7c16f472017-01-18 15:25:09 +02003323
Mark Bloch0ad17a82016-06-17 15:10:56 +03003324free:
3325 kvfree(out);
Kamal Heib7c16f472017-01-18 15:25:09 +02003326 return port->q_cnts.num_counters;
Mark Bloch0ad17a82016-06-17 15:10:56 +03003327}
3328
Jack Morgenstein9603b612014-07-28 23:30:22 +03003329static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
Eli Cohene126ba92013-07-07 17:25:49 +03003330{
Eli Cohene126ba92013-07-07 17:25:49 +03003331 struct mlx5_ib_dev *dev;
Achiad Shochatebd61f62015-12-23 18:47:16 +02003332 enum rdma_link_layer ll;
3333 int port_type_cap;
Aviv Heller4babcf92016-09-18 20:48:03 +03003334 const char *name;
Eli Cohene126ba92013-07-07 17:25:49 +03003335 int err;
3336 int i;
3337
Achiad Shochatebd61f62015-12-23 18:47:16 +02003338 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
3339 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
3340
Eli Cohene126ba92013-07-07 17:25:49 +03003341 printk_once(KERN_INFO "%s", mlx5_version);
3342
3343 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
3344 if (!dev)
Jack Morgenstein9603b612014-07-28 23:30:22 +03003345 return NULL;
Eli Cohene126ba92013-07-07 17:25:49 +03003346
Jack Morgenstein9603b612014-07-28 23:30:22 +03003347 dev->mdev = mdev;
Eli Cohene126ba92013-07-07 17:25:49 +03003348
Mark Bloch0837e862016-06-17 15:10:55 +03003349 dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
3350 GFP_KERNEL);
3351 if (!dev->port)
3352 goto err_dealloc;
3353
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003354 rwlock_init(&dev->roce.netdev_lock);
Eli Cohene126ba92013-07-07 17:25:49 +03003355 err = get_port_caps(dev);
3356 if (err)
Mark Bloch0837e862016-06-17 15:10:55 +03003357 goto err_free_port;
Eli Cohene126ba92013-07-07 17:25:49 +03003358
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03003359 if (mlx5_use_mad_ifc(dev))
3360 get_ext_port_caps(dev);
Eli Cohene126ba92013-07-07 17:25:49 +03003361
Aviv Heller4babcf92016-09-18 20:48:03 +03003362 if (!mlx5_lag_is_active(mdev))
3363 name = "mlx5_%d";
3364 else
3365 name = "mlx5_bond_%d";
3366
3367 strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX);
Eli Cohene126ba92013-07-07 17:25:49 +03003368 dev->ib_dev.owner = THIS_MODULE;
3369 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
Sagi Grimbergc6790aa2015-09-24 10:34:23 +03003370 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
Saeed Mahameed938fe832015-05-28 22:28:41 +03003371 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
Eli Cohene126ba92013-07-07 17:25:49 +03003372 dev->ib_dev.phys_port_cnt = dev->num_ports;
Saeed Mahameed233d05d2015-04-02 17:07:32 +03003373 dev->ib_dev.num_comp_vectors =
3374 dev->mdev->priv.eq_table.num_comp_vectors;
Bart Van Assche9b0c2892017-01-20 13:04:21 -08003375 dev->ib_dev.dev.parent = &mdev->pdev->dev;
Eli Cohene126ba92013-07-07 17:25:49 +03003376
3377 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
3378 dev->ib_dev.uverbs_cmd_mask =
3379 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
3380 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
3381 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
3382 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
3383 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
Moni Shoua41c450f2016-11-23 08:23:26 +02003384 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
3385 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
Eli Cohene126ba92013-07-07 17:25:49 +03003386 (1ull << IB_USER_VERBS_CMD_REG_MR) |
Noa Osherovich56e11d62016-02-29 16:46:51 +02003387 (1ull << IB_USER_VERBS_CMD_REREG_MR) |
Eli Cohene126ba92013-07-07 17:25:49 +03003388 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
3389 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
3390 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
3391 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
3392 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
3393 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
3394 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
3395 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
3396 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
3397 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
3398 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
3399 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
3400 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
3401 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
3402 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
3403 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
3404 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
Haggai Eran1707cb42015-02-08 13:28:52 +02003405 dev->ib_dev.uverbs_ex_cmd_mask =
Matan Barakd4584dd2016-01-28 17:51:46 +02003406 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
3407 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
Bodong Wang7d29f342016-12-01 13:43:16 +02003408 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
3409 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP);
Eli Cohene126ba92013-07-07 17:25:49 +03003410
3411 dev->ib_dev.query_device = mlx5_ib_query_device;
3412 dev->ib_dev.query_port = mlx5_ib_query_port;
Achiad Shochatebd61f62015-12-23 18:47:16 +02003413 dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003414 if (ll == IB_LINK_LAYER_ETHERNET)
3415 dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
Eli Cohene126ba92013-07-07 17:25:49 +03003416 dev->ib_dev.query_gid = mlx5_ib_query_gid;
Achiad Shochat3cca2602015-12-23 18:47:23 +02003417 dev->ib_dev.add_gid = mlx5_ib_add_gid;
3418 dev->ib_dev.del_gid = mlx5_ib_del_gid;
Eli Cohene126ba92013-07-07 17:25:49 +03003419 dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
3420 dev->ib_dev.modify_device = mlx5_ib_modify_device;
3421 dev->ib_dev.modify_port = mlx5_ib_modify_port;
3422 dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;
3423 dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext;
3424 dev->ib_dev.mmap = mlx5_ib_mmap;
3425 dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd;
3426 dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd;
3427 dev->ib_dev.create_ah = mlx5_ib_create_ah;
3428 dev->ib_dev.query_ah = mlx5_ib_query_ah;
3429 dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah;
3430 dev->ib_dev.create_srq = mlx5_ib_create_srq;
3431 dev->ib_dev.modify_srq = mlx5_ib_modify_srq;
3432 dev->ib_dev.query_srq = mlx5_ib_query_srq;
3433 dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq;
3434 dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv;
3435 dev->ib_dev.create_qp = mlx5_ib_create_qp;
3436 dev->ib_dev.modify_qp = mlx5_ib_modify_qp;
3437 dev->ib_dev.query_qp = mlx5_ib_query_qp;
3438 dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp;
3439 dev->ib_dev.post_send = mlx5_ib_post_send;
3440 dev->ib_dev.post_recv = mlx5_ib_post_recv;
3441 dev->ib_dev.create_cq = mlx5_ib_create_cq;
3442 dev->ib_dev.modify_cq = mlx5_ib_modify_cq;
3443 dev->ib_dev.resize_cq = mlx5_ib_resize_cq;
3444 dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq;
3445 dev->ib_dev.poll_cq = mlx5_ib_poll_cq;
3446 dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq;
3447 dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
3448 dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
Noa Osherovich56e11d62016-02-29 16:46:51 +02003449 dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr;
Eli Cohene126ba92013-07-07 17:25:49 +03003450 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
3451 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
3452 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
3453 dev->ib_dev.process_mad = mlx5_ib_process_mad;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03003454 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03003455 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02003456 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
Ira Weiny77386132015-05-13 20:02:58 -04003457 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
Ira Weinyc7342822016-06-15 02:22:01 -04003458 dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
Eli Coheneff901d2016-03-11 22:58:42 +02003459 if (mlx5_core_is_pf(mdev)) {
3460 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
3461 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
3462 dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats;
3463 dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid;
3464 }
Eli Cohene126ba92013-07-07 17:25:49 +03003465
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03003466 dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
3467
Saeed Mahameed938fe832015-05-28 22:28:41 +03003468 mlx5_ib_internal_fill_odp_caps(dev);
Haggai Eran8cdd3122014-12-11 17:04:20 +02003469
Matan Barakd2370e02016-02-29 18:05:30 +02003470 if (MLX5_CAP_GEN(mdev, imaicl)) {
3471 dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
3472 dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw;
3473 dev->ib_dev.uverbs_cmd_mask |=
3474 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
3475 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
3476 }
3477
Kamal Heib7c16f472017-01-18 15:25:09 +02003478 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
Mark Bloch0ad17a82016-06-17 15:10:56 +03003479 dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
3480 dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
3481 }
3482
Saeed Mahameed938fe832015-05-28 22:28:41 +03003483 if (MLX5_CAP_GEN(mdev, xrc)) {
Eli Cohene126ba92013-07-07 17:25:49 +03003484 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
3485 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
3486 dev->ib_dev.uverbs_cmd_mask |=
3487 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
3488 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
3489 }
3490
Linus Torvalds048ccca2016-01-23 18:45:06 -08003491 if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003492 IB_LINK_LAYER_ETHERNET) {
3493 dev->ib_dev.create_flow = mlx5_ib_create_flow;
3494 dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
Yishai Hadas79b20a62016-05-23 15:20:50 +03003495 dev->ib_dev.create_wq = mlx5_ib_create_wq;
3496 dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
3497 dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
Yishai Hadasc5f90922016-05-23 15:20:53 +03003498 dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
3499 dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003500 dev->ib_dev.uverbs_ex_cmd_mask |=
3501 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
Yishai Hadas79b20a62016-05-23 15:20:50 +03003502 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) |
3503 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
3504 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
Yishai Hadasc5f90922016-05-23 15:20:53 +03003505 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
3506 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
3507 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003508 }
Eli Cohene126ba92013-07-07 17:25:49 +03003509 err = init_node_data(dev);
3510 if (err)
Majd Dibbiny90be7c82016-10-27 16:36:39 +03003511 goto err_free_port;
Eli Cohene126ba92013-07-07 17:25:49 +03003512
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003513 mutex_init(&dev->flow_db.lock);
Eli Cohene126ba92013-07-07 17:25:49 +03003514 mutex_init(&dev->cap_mask_mutex);
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03003515 INIT_LIST_HEAD(&dev->qp_list);
3516 spin_lock_init(&dev->reset_flow_resource_lock);
Eli Cohene126ba92013-07-07 17:25:49 +03003517
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003518 if (ll == IB_LINK_LAYER_ETHERNET) {
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003519 err = mlx5_enable_eth(dev);
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003520 if (err)
Majd Dibbiny90be7c82016-10-27 16:36:39 +03003521 goto err_free_port;
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003522 }
3523
Eli Cohene126ba92013-07-07 17:25:49 +03003524 err = create_dev_resources(&dev->devr);
3525 if (err)
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003526 goto err_disable_eth;
Eli Cohene126ba92013-07-07 17:25:49 +03003527
Haggai Eran6aec21f2014-12-11 17:04:23 +02003528 err = mlx5_ib_odp_init_one(dev);
Wei Yongjun281d1a92013-07-30 07:54:26 +08003529 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +03003530 goto err_rsrc;
3531
Kamal Heib45bded22017-01-18 14:10:32 +02003532 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
3533 err = mlx5_ib_alloc_q_counters(dev);
3534 if (err)
3535 goto err_odp;
3536 }
Haggai Eran6aec21f2014-12-11 17:04:23 +02003537
Eli Cohen5fe9dec2017-01-03 23:55:25 +02003538 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
3539 if (!dev->mdev->priv.uar)
3540 goto err_q_cnt;
3541
3542 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
3543 if (err)
3544 goto err_uar_page;
3545
3546 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
3547 if (err)
3548 goto err_bfreg;
3549
Mark Bloch0837e862016-06-17 15:10:55 +03003550 err = ib_register_device(&dev->ib_dev, NULL);
3551 if (err)
Eli Cohen5fe9dec2017-01-03 23:55:25 +02003552 goto err_fp_bfreg;
Mark Bloch0837e862016-06-17 15:10:55 +03003553
Eli Cohene126ba92013-07-07 17:25:49 +03003554 err = create_umr_res(dev);
3555 if (err)
3556 goto err_dev;
3557
3558 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
Wei Yongjun281d1a92013-07-30 07:54:26 +08003559 err = device_create_file(&dev->ib_dev.dev,
3560 mlx5_class_attributes[i]);
3561 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +03003562 goto err_umrc;
3563 }
3564
3565 dev->ib_active = true;
3566
Jack Morgenstein9603b612014-07-28 23:30:22 +03003567 return dev;
Eli Cohene126ba92013-07-07 17:25:49 +03003568
3569err_umrc:
3570 destroy_umrc_res(dev);
3571
3572err_dev:
3573 ib_unregister_device(&dev->ib_dev);
3574
Eli Cohen5fe9dec2017-01-03 23:55:25 +02003575err_fp_bfreg:
3576 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
3577
3578err_bfreg:
3579 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
3580
3581err_uar_page:
3582 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
3583
Mark Bloch0837e862016-06-17 15:10:55 +03003584err_q_cnt:
Kamal Heib45bded22017-01-18 14:10:32 +02003585 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
3586 mlx5_ib_dealloc_q_counters(dev);
Mark Bloch0837e862016-06-17 15:10:55 +03003587
Haggai Eran6aec21f2014-12-11 17:04:23 +02003588err_odp:
3589 mlx5_ib_odp_remove_one(dev);
3590
Eli Cohene126ba92013-07-07 17:25:49 +03003591err_rsrc:
3592 destroy_dev_resources(&dev->devr);
3593
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003594err_disable_eth:
Aviv Heller5ec8c832016-09-18 20:48:00 +03003595 if (ll == IB_LINK_LAYER_ETHERNET) {
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003596 mlx5_disable_eth(dev);
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003597 mlx5_remove_netdev_notifier(dev);
Aviv Heller5ec8c832016-09-18 20:48:00 +03003598 }
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003599
Mark Bloch0837e862016-06-17 15:10:55 +03003600err_free_port:
3601 kfree(dev->port);
3602
Jack Morgenstein9603b612014-07-28 23:30:22 +03003603err_dealloc:
Eli Cohene126ba92013-07-07 17:25:49 +03003604 ib_dealloc_device((struct ib_device *)dev);
3605
Jack Morgenstein9603b612014-07-28 23:30:22 +03003606 return NULL;
Eli Cohene126ba92013-07-07 17:25:49 +03003607}
3608
Jack Morgenstein9603b612014-07-28 23:30:22 +03003609static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
Eli Cohene126ba92013-07-07 17:25:49 +03003610{
Jack Morgenstein9603b612014-07-28 23:30:22 +03003611 struct mlx5_ib_dev *dev = context;
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003612 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
Haggai Eran6aec21f2014-12-11 17:04:23 +02003613
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003614 mlx5_remove_netdev_notifier(dev);
Eli Cohene126ba92013-07-07 17:25:49 +03003615 ib_unregister_device(&dev->ib_dev);
Eli Cohen5fe9dec2017-01-03 23:55:25 +02003616 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
3617 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
3618 mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
Kamal Heib45bded22017-01-18 14:10:32 +02003619 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
3620 mlx5_ib_dealloc_q_counters(dev);
Eli Coheneefd56e2014-09-14 16:47:50 +03003621 destroy_umrc_res(dev);
Haggai Eran6aec21f2014-12-11 17:04:23 +02003622 mlx5_ib_odp_remove_one(dev);
Eli Cohene126ba92013-07-07 17:25:49 +03003623 destroy_dev_resources(&dev->devr);
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003624 if (ll == IB_LINK_LAYER_ETHERNET)
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003625 mlx5_disable_eth(dev);
Mark Bloch0837e862016-06-17 15:10:55 +03003626 kfree(dev->port);
Eli Cohene126ba92013-07-07 17:25:49 +03003627 ib_dealloc_device(&dev->ib_dev);
3628}
3629
Jack Morgenstein9603b612014-07-28 23:30:22 +03003630static struct mlx5_interface mlx5_ib_interface = {
3631 .add = mlx5_ib_add,
3632 .remove = mlx5_ib_remove,
3633 .event = mlx5_ib_event,
Artemy Kovalyovd9aaed82017-01-02 11:37:46 +02003634#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3635 .pfault = mlx5_ib_pfault,
3636#endif
Saeed Mahameed64613d942015-04-02 17:07:34 +03003637 .protocol = MLX5_INTERFACE_PROTOCOL_IB,
Eli Cohene126ba92013-07-07 17:25:49 +03003638};
3639
3640static int __init mlx5_ib_init(void)
3641{
Haggai Eran6aec21f2014-12-11 17:04:23 +02003642 int err;
3643
Artemy Kovalyov81713d32017-01-18 16:58:11 +02003644 mlx5_ib_odp_init();
Jack Morgenstein9603b612014-07-28 23:30:22 +03003645
Haggai Eran6aec21f2014-12-11 17:04:23 +02003646 err = mlx5_register_interface(&mlx5_ib_interface);
Haggai Eran6aec21f2014-12-11 17:04:23 +02003647
3648 return err;
Eli Cohene126ba92013-07-07 17:25:49 +03003649}
3650
3651static void __exit mlx5_ib_cleanup(void)
3652{
Jack Morgenstein9603b612014-07-28 23:30:22 +03003653 mlx5_unregister_interface(&mlx5_ib_interface);
Eli Cohene126ba92013-07-07 17:25:49 +03003654}
3655
3656module_init(mlx5_ib_init);
3657module_exit(mlx5_ib_cleanup);