blob: d8f0b94f1dc446cc9ae0a994f21ced9d03c17e01 [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07003 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070037#include <linux/errno.h>
Eli Cohenfa417f72010-10-24 21:08:52 -070038#include <linux/netdevice.h>
39#include <linux/inetdevice.h>
40#include <linux/rtnetlink.h>
Eli Cohen4c3eb3c2010-08-26 17:19:22 +030041#include <linux/if_vlan.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010042#include <linux/sched/mm.h>
Ingo Molnar0881e7b2017-02-05 15:30:50 +010043#include <linux/sched/task.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010044
Moni Shouad487ee72013-12-12 18:03:13 +020045#include <net/ipv6.h>
46#include <net/addrconf.h>
Jiri Pirko09d4d082016-02-26 17:32:24 +010047#include <net/devlink.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070048
49#include <rdma/ib_smi.h>
50#include <rdma/ib_user_verbs.h>
Eli Cohenfa417f72010-10-24 21:08:52 -070051#include <rdma/ib_addr.h>
Moni Shouae26be1b2015-07-30 18:33:29 +030052#include <rdma/ib_cache.h>
53
54#include <net/bonding.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070055
56#include <linux/mlx4/driver.h>
57#include <linux/mlx4/cmd.h>
Matan Barak9433c182014-05-15 15:29:28 +030058#include <linux/mlx4/qp.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070059
60#include "mlx4_ib.h"
Leon Romanovsky9ce28a22016-09-22 17:31:14 +030061#include <rdma/mlx4-abi.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070062
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +030063#define DRV_NAME MLX4_IB_DRV_NAME
Tariq Toukan0a528ee2017-06-07 16:26:15 +030064#define DRV_VERSION "4.0-0"
Roland Dreier225c7b12007-05-08 18:00:38 -070065
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030066#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
Matan Baraka37a1a42013-11-07 15:25:16 +020067#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
Markus Stockhausen50e2ec92014-08-13 14:07:30 +000068#define MLX4_IB_CARD_REV_A0 0xA0
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030069
Roland Dreier225c7b12007-05-08 18:00:38 -070070MODULE_AUTHOR("Roland Dreier");
71MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
72MODULE_LICENSE("Dual BSD/GPL");
Roland Dreier225c7b12007-05-08 18:00:38 -070073
Yishai Hadas56c1d232015-02-12 09:49:43 +020074int mlx4_ib_sm_guid_assign = 0;
Jack Morgensteina0c64a12012-08-03 08:40:49 +000075module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
Yishai Hadas56c1d232015-02-12 09:49:43 +020076MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
Jack Morgensteina0c64a12012-08-03 08:40:49 +000077
Roland Dreier68f39482008-02-04 20:20:44 -080078static const char mlx4_ib_version[] =
Roland Dreier225c7b12007-05-08 18:00:38 -070079 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
Tariq Toukan0a528ee2017-06-07 16:26:15 +030080 DRV_VERSION "\n";
Roland Dreier225c7b12007-05-08 18:00:38 -070081
Jack Morgenstein3806d082012-08-03 08:40:58 +000082static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
Guy Levi400b1eb2017-07-04 16:24:24 +030083static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
84 u8 port_num);
Jack Morgenstein3806d082012-08-03 08:40:58 +000085
Eli Cohenfa417f72010-10-24 21:08:52 -070086static struct workqueue_struct *wq;
87
Roland Dreier225c7b12007-05-08 18:00:38 -070088static void init_query_mad(struct ib_smp *mad)
89{
90 mad->base_version = 1;
91 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
92 mad->class_version = 1;
93 mad->method = IB_MGMT_METHOD_GET;
94}
95
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030096static int check_flow_steering_support(struct mlx4_dev *dev)
97{
Matan Barak0a9b7d52013-11-07 15:25:15 +020098 int eth_num_ports = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030099 int ib_num_ports = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300100
Matan Barak0a9b7d52013-11-07 15:25:15 +0200101 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300102
Matan Barak0a9b7d52013-11-07 15:25:15 +0200103 if (dmfs) {
104 int i;
105 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
106 eth_num_ports++;
107 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
108 ib_num_ports++;
109 dmfs &= (!ib_num_ports ||
110 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
111 (!eth_num_ports ||
112 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
113 if (ib_num_ports && mlx4_is_mfunc(dev)) {
114 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
115 dmfs = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300116 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300117 }
Matan Barak0a9b7d52013-11-07 15:25:15 +0200118 return dmfs;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300119}
120
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300121static int num_ib_ports(struct mlx4_dev *dev)
122{
123 int ib_ports = 0;
124 int i;
125
126 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
127 ib_ports++;
128
129 return ib_ports;
130}
131
Moni Shouae26be1b2015-07-30 18:33:29 +0300132static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
133{
134 struct mlx4_ib_dev *ibdev = to_mdev(device);
135 struct net_device *dev;
136
137 rcu_read_lock();
138 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
139
140 if (dev) {
141 if (mlx4_is_bonded(ibdev->dev)) {
142 struct net_device *upper = NULL;
143
144 upper = netdev_master_upper_dev_get_rcu(dev);
145 if (upper) {
146 struct net_device *active;
147
148 active = bond_option_active_slave_get_rcu(netdev_priv(upper));
149 if (active)
150 dev = active;
151 }
152 }
153 }
154 if (dev)
155 dev_hold(dev);
156
157 rcu_read_unlock();
158 return dev;
159}
160
Moni Shoua7e57b852016-01-14 17:50:35 +0200161static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
162 struct mlx4_ib_dev *ibdev,
163 u8 port_num)
Moni Shouae26be1b2015-07-30 18:33:29 +0300164{
165 struct mlx4_cmd_mailbox *mailbox;
166 int err;
167 struct mlx4_dev *dev = ibdev->dev;
168 int i;
169 union ib_gid *gid_tbl;
170
171 mailbox = mlx4_alloc_cmd_mailbox(dev);
172 if (IS_ERR(mailbox))
173 return -ENOMEM;
174
175 gid_tbl = mailbox->buf;
176
177 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
178 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
179
180 err = mlx4_cmd(dev, mailbox->dma,
181 MLX4_SET_PORT_GID_TABLE << 8 | port_num,
182 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
183 MLX4_CMD_WRAPPED);
184 if (mlx4_is_bonded(dev))
185 err += mlx4_cmd(dev, mailbox->dma,
186 MLX4_SET_PORT_GID_TABLE << 8 | 2,
187 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
188 MLX4_CMD_WRAPPED);
189
190 mlx4_free_cmd_mailbox(dev, mailbox);
191 return err;
192}
193
Moni Shoua7e57b852016-01-14 17:50:35 +0200194static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
195 struct mlx4_ib_dev *ibdev,
196 u8 port_num)
197{
198 struct mlx4_cmd_mailbox *mailbox;
199 int err;
200 struct mlx4_dev *dev = ibdev->dev;
201 int i;
202 struct {
203 union ib_gid gid;
204 __be32 rsrvd1[2];
205 __be16 rsrvd2;
206 u8 type;
207 u8 version;
208 __be32 rsrvd3;
209 } *gid_tbl;
210
211 mailbox = mlx4_alloc_cmd_mailbox(dev);
212 if (IS_ERR(mailbox))
213 return -ENOMEM;
214
215 gid_tbl = mailbox->buf;
216 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
217 memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
218 if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
219 gid_tbl[i].version = 2;
220 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
221 gid_tbl[i].type = 1;
222 else
223 memset(&gid_tbl[i].gid, 0, 12);
224 }
225 }
226
227 err = mlx4_cmd(dev, mailbox->dma,
228 MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
229 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
230 MLX4_CMD_WRAPPED);
231 if (mlx4_is_bonded(dev))
232 err += mlx4_cmd(dev, mailbox->dma,
233 MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
234 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
235 MLX4_CMD_WRAPPED);
236
237 mlx4_free_cmd_mailbox(dev, mailbox);
238 return err;
239}
240
241static int mlx4_ib_update_gids(struct gid_entry *gids,
242 struct mlx4_ib_dev *ibdev,
243 u8 port_num)
244{
245 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
246 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
247
248 return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
249}
250
Moni Shouae26be1b2015-07-30 18:33:29 +0300251static int mlx4_ib_add_gid(struct ib_device *device,
252 u8 port_num,
253 unsigned int index,
254 const union ib_gid *gid,
255 const struct ib_gid_attr *attr,
256 void **context)
257{
258 struct mlx4_ib_dev *ibdev = to_mdev(device);
259 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
260 struct mlx4_port_gid_table *port_gid_table;
261 int free = -1, found = -1;
262 int ret = 0;
263 int hw_update = 0;
264 int i;
265 struct gid_entry *gids = NULL;
266
267 if (!rdma_cap_roce_gid_table(device, port_num))
268 return -EINVAL;
269
270 if (port_num > MLX4_MAX_PORTS)
271 return -EINVAL;
272
273 if (!context)
274 return -EINVAL;
275
276 port_gid_table = &iboe->gids[port_num - 1];
277 spin_lock_bh(&iboe->lock);
278 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
Moni Shouab699a852016-01-14 17:50:33 +0200279 if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid)) &&
280 (port_gid_table->gids[i].gid_type == attr->gid_type)) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300281 found = i;
282 break;
283 }
284 if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid)))
285 free = i; /* HW has space */
286 }
287
288 if (found < 0) {
289 if (free < 0) {
290 ret = -ENOSPC;
291 } else {
292 port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
293 if (!port_gid_table->gids[free].ctx) {
294 ret = -ENOMEM;
295 } else {
296 *context = port_gid_table->gids[free].ctx;
297 memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid));
Moni Shouab699a852016-01-14 17:50:33 +0200298 port_gid_table->gids[free].gid_type = attr->gid_type;
Moni Shouae26be1b2015-07-30 18:33:29 +0300299 port_gid_table->gids[free].ctx->real_index = free;
300 port_gid_table->gids[free].ctx->refcount = 1;
301 hw_update = 1;
302 }
303 }
304 } else {
305 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
306 *context = ctx;
307 ctx->refcount++;
308 }
309 if (!ret && hw_update) {
310 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
311 if (!gids) {
312 ret = -ENOMEM;
313 } else {
Moni Shouab699a852016-01-14 17:50:33 +0200314 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300315 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
Moni Shouab699a852016-01-14 17:50:33 +0200316 gids[i].gid_type = port_gid_table->gids[i].gid_type;
317 }
Moni Shouae26be1b2015-07-30 18:33:29 +0300318 }
319 }
320 spin_unlock_bh(&iboe->lock);
321
322 if (!ret && hw_update) {
323 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
324 kfree(gids);
325 }
326
327 return ret;
328}
329
330static int mlx4_ib_del_gid(struct ib_device *device,
331 u8 port_num,
332 unsigned int index,
333 void **context)
334{
335 struct gid_cache_context *ctx = *context;
336 struct mlx4_ib_dev *ibdev = to_mdev(device);
337 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
338 struct mlx4_port_gid_table *port_gid_table;
339 int ret = 0;
340 int hw_update = 0;
341 struct gid_entry *gids = NULL;
342
343 if (!rdma_cap_roce_gid_table(device, port_num))
344 return -EINVAL;
345
346 if (port_num > MLX4_MAX_PORTS)
347 return -EINVAL;
348
349 port_gid_table = &iboe->gids[port_num - 1];
350 spin_lock_bh(&iboe->lock);
351 if (ctx) {
352 ctx->refcount--;
353 if (!ctx->refcount) {
354 unsigned int real_index = ctx->real_index;
355
356 memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid));
357 kfree(port_gid_table->gids[real_index].ctx);
358 port_gid_table->gids[real_index].ctx = NULL;
359 hw_update = 1;
360 }
361 }
362 if (!ret && hw_update) {
363 int i;
364
365 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
366 if (!gids) {
367 ret = -ENOMEM;
368 } else {
369 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
370 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
371 }
372 }
373 spin_unlock_bh(&iboe->lock);
374
375 if (!ret && hw_update) {
376 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
377 kfree(gids);
378 }
379 return ret;
380}
381
382int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
383 u8 port_num, int index)
384{
385 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
386 struct gid_cache_context *ctx = NULL;
387 union ib_gid gid;
388 struct mlx4_port_gid_table *port_gid_table;
389 int real_index = -EINVAL;
390 int i;
391 int ret;
392 unsigned long flags;
Moni Shouab699a852016-01-14 17:50:33 +0200393 struct ib_gid_attr attr;
Moni Shouae26be1b2015-07-30 18:33:29 +0300394
395 if (port_num > MLX4_MAX_PORTS)
396 return -EINVAL;
397
398 if (mlx4_is_bonded(ibdev->dev))
399 port_num = 1;
400
401 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
402 return index;
403
Moni Shouab699a852016-01-14 17:50:33 +0200404 ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid, &attr);
Moni Shouae26be1b2015-07-30 18:33:29 +0300405 if (ret)
406 return ret;
407
Moni Shouab699a852016-01-14 17:50:33 +0200408 if (attr.ndev)
409 dev_put(attr.ndev);
410
Moni Shouae26be1b2015-07-30 18:33:29 +0300411 if (!memcmp(&gid, &zgid, sizeof(gid)))
412 return -EINVAL;
413
414 spin_lock_irqsave(&iboe->lock, flags);
415 port_gid_table = &iboe->gids[port_num - 1];
416
417 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
Moni Shouab699a852016-01-14 17:50:33 +0200418 if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid)) &&
419 attr.gid_type == port_gid_table->gids[i].gid_type) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300420 ctx = port_gid_table->gids[i].ctx;
421 break;
422 }
423 if (ctx)
424 real_index = ctx->real_index;
425 spin_unlock_irqrestore(&iboe->lock, flags);
426 return real_index;
427}
428
Roland Dreier225c7b12007-05-08 18:00:38 -0700429static int mlx4_ib_query_device(struct ib_device *ibdev,
Matan Barak2528e332015-06-11 16:35:25 +0300430 struct ib_device_attr *props,
431 struct ib_udata *uhw)
Roland Dreier225c7b12007-05-08 18:00:38 -0700432{
433 struct mlx4_ib_dev *dev = to_mdev(ibdev);
434 struct ib_smp *in_mad = NULL;
435 struct ib_smp *out_mad = NULL;
Pan Bian46d07032016-12-04 14:45:38 +0800436 int err;
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300437 int have_ib_ports;
Matan Barak4b664c42015-06-11 16:35:27 +0300438 struct mlx4_uverbs_ex_query_device cmd;
439 struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
440 struct mlx4_clock_params clock_params;
Roland Dreier225c7b12007-05-08 18:00:38 -0700441
Matan Barak4b664c42015-06-11 16:35:27 +0300442 if (uhw->inlen) {
443 if (uhw->inlen < sizeof(cmd))
444 return -EINVAL;
Matan Barak2528e332015-06-11 16:35:25 +0300445
Matan Barak4b664c42015-06-11 16:35:27 +0300446 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
447 if (err)
448 return err;
449
450 if (cmd.comp_mask)
451 return -EINVAL;
452
453 if (cmd.reserved)
454 return -EINVAL;
455 }
456
457 resp.response_length = offsetof(typeof(resp), response_length) +
458 sizeof(resp.response_length);
Roland Dreier225c7b12007-05-08 18:00:38 -0700459 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
460 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
Pan Bian46d07032016-12-04 14:45:38 +0800461 err = -ENOMEM;
Roland Dreier225c7b12007-05-08 18:00:38 -0700462 if (!in_mad || !out_mad)
463 goto out;
464
465 init_query_mad(in_mad);
466 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
467
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000468 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
469 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700470 if (err)
471 goto out;
472
473 memset(props, 0, sizeof *props);
474
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300475 have_ib_ports = num_ib_ports(dev->dev);
476
Roland Dreier225c7b12007-05-08 18:00:38 -0700477 props->fw_ver = dev->dev->caps.fw_ver;
478 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
479 IB_DEVICE_PORT_ACTIVE_EVENT |
480 IB_DEVICE_SYS_IMAGE_GUID |
Ron Livne521e5752008-07-14 23:48:48 -0700481 IB_DEVICE_RC_RNR_NAK_GEN |
482 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
Roland Dreier225c7b12007-05-08 18:00:38 -0700483 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
484 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
485 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
486 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300487 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
Roland Dreier225c7b12007-05-08 18:00:38 -0700488 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
489 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
490 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
Eli Cohen8ff095e2008-04-16 21:01:10 -0700491 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
492 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
Markus Stockhausen50e2ec92014-08-13 14:07:30 +0000493 if (dev->dev->caps.max_gso_sz &&
494 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
495 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
Eli Cohenb832be12008-04-16 21:09:27 -0700496 props->device_cap_flags |= IB_DEVICE_UD_TSO;
Roland Dreier95d04f02008-07-23 08:12:26 -0700497 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
498 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
499 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
500 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
501 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
502 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
Sean Hefty0a1405d2011-06-02 11:32:15 -0700503 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
504 props->device_cap_flags |= IB_DEVICE_XRC;
Shani Michaelib4253882013-02-06 16:19:16 +0000505 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
506 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
507 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
508 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
509 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
510 else
511 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
512 }
Bart Van Asscheca920f52016-06-03 07:58:32 -0700513 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
514 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
Roland Dreier225c7b12007-05-08 18:00:38 -0700515
Bodong Wang070b3992015-09-22 23:18:11 +0300516 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
517
Roland Dreier225c7b12007-05-08 18:00:38 -0700518 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
519 0xffffff;
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200520 props->vendor_part_id = dev->dev->persist->pdev->device;
Roland Dreier225c7b12007-05-08 18:00:38 -0700521 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
522 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
523
524 props->max_mr_size = ~0ull;
525 props->page_size_cap = dev->dev->caps.page_size_cap;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200526 props->max_qp = dev->dev->quotas.qp;
Sagi Grimbergfc2d0042012-05-24 16:08:08 +0300527 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
Roland Dreier225c7b12007-05-08 18:00:38 -0700528 props->max_sge = min(dev->dev->caps.max_sq_sg,
529 dev->dev->caps.max_rq_sg);
Sagi Grimberga5e14ba2015-10-28 13:28:15 +0200530 props->max_sge_rd = MLX4_MAX_SGE_RD;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200531 props->max_cq = dev->dev->quotas.cq;
Roland Dreier225c7b12007-05-08 18:00:38 -0700532 props->max_cqe = dev->dev->caps.max_cqes;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200533 props->max_mr = dev->dev->quotas.mpt;
Roland Dreier225c7b12007-05-08 18:00:38 -0700534 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
535 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
536 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
537 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200538 props->max_srq = dev->dev->quotas.srq;
Jack Morgensteinc8681f12007-06-21 13:39:10 -0700539 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
Roland Dreier225c7b12007-05-08 18:00:38 -0700540 props->max_srq_sge = dev->dev->caps.max_srq_sge;
Eli Cohen5a0fd092010-10-07 16:24:16 +0200541 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
Roland Dreier225c7b12007-05-08 18:00:38 -0700542 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
543 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
544 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
Dotan Barak47e956b2012-07-11 15:39:29 +0000545 props->masked_atomic_cap = props->atomic_cap;
Roland Dreier5ae2a7a2007-06-18 08:15:02 -0700546 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
Roland Dreier225c7b12007-05-08 18:00:38 -0700547 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
548 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
549 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
550 props->max_mcast_grp;
Eli Cohena5bbe892012-02-09 18:10:06 +0200551 props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
Matan Barak4b664c42015-06-11 16:35:27 +0300552 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
553 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
Maor Gottlieb731e0412016-11-10 11:30:58 +0200554 props->max_ah = INT_MAX;
Roland Dreier225c7b12007-05-08 18:00:38 -0700555
Guy Levi400b1eb2017-07-04 16:24:24 +0300556 if ((dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
557 (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
Guy Levi6afff1c2017-07-04 16:24:27 +0300558 mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET)) {
559 props->rss_caps.max_rwq_indirection_tables = props->max_qp;
560 props->rss_caps.max_rwq_indirection_table_size =
561 dev->dev->caps.max_rss_tbl_sz;
562 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
Guy Levi400b1eb2017-07-04 16:24:24 +0300563 props->max_wq_type_rq = props->max_qp;
Guy Levi6afff1c2017-07-04 16:24:27 +0300564 }
Guy Levi400b1eb2017-07-04 16:24:24 +0300565
Matan Barak8a7ff142015-07-01 14:31:02 +0300566 if (!mlx4_is_slave(dev->dev))
567 err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
Matan Barak4b664c42015-06-11 16:35:27 +0300568
569 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
Matan Barak4b664c42015-06-11 16:35:27 +0300570 resp.response_length += sizeof(resp.hca_core_clock_offset);
Matan Barak8a7ff142015-07-01 14:31:02 +0300571 if (!err && !mlx4_is_slave(dev->dev)) {
572 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
573 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
574 }
Matan Barak4b664c42015-06-11 16:35:27 +0300575 }
576
Maor Gottliebea30b962017-06-21 09:26:28 +0300577 if (uhw->outlen >= resp.response_length +
578 sizeof(resp.max_inl_recv_sz)) {
579 resp.response_length += sizeof(resp.max_inl_recv_sz);
580 resp.max_inl_recv_sz = dev->dev->caps.max_rq_sg *
581 sizeof(struct mlx4_wqe_data_seg);
582 }
583
Guy Levi09d208b22017-10-25 22:39:34 +0300584 if (uhw->outlen >= resp.response_length + sizeof(resp.rss_caps)) {
585 resp.response_length += sizeof(resp.rss_caps);
586 if (props->rss_caps.supported_qpts) {
587 resp.rss_caps.rx_hash_function =
588 MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
589 resp.rss_caps.rx_hash_fields_mask =
590 MLX4_IB_RX_HASH_SRC_IPV4 |
591 MLX4_IB_RX_HASH_DST_IPV4 |
592 MLX4_IB_RX_HASH_SRC_IPV6 |
593 MLX4_IB_RX_HASH_DST_IPV6 |
594 MLX4_IB_RX_HASH_SRC_PORT_TCP |
595 MLX4_IB_RX_HASH_DST_PORT_TCP |
596 MLX4_IB_RX_HASH_SRC_PORT_UDP |
597 MLX4_IB_RX_HASH_DST_PORT_UDP;
598 }
599 }
600
Matan Barak4b664c42015-06-11 16:35:27 +0300601 if (uhw->outlen) {
602 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
603 if (err)
604 goto out;
605 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700606out:
607 kfree(in_mad);
608 kfree(out_mad);
609
610 return err;
611}
612
Eli Cohenfa417f72010-10-24 21:08:52 -0700613static enum rdma_link_layer
614mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
615{
616 struct mlx4_dev *dev = to_mdev(device)->dev;
617
Jack Morgenstein65dab252011-12-13 04:10:41 +0000618 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
Eli Cohenfa417f72010-10-24 21:08:52 -0700619 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
620}
621
622static int ib_link_query_port(struct ib_device *ibdev, u8 port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000623 struct ib_port_attr *props, int netw_view)
Eli Cohenfa417f72010-10-24 21:08:52 -0700624{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200625 struct ib_smp *in_mad = NULL;
626 struct ib_smp *out_mad = NULL;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300627 int ext_active_speed;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000628 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200629 int err = -ENOMEM;
630
631 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
632 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
633 if (!in_mad || !out_mad)
634 goto out;
635
636 init_query_mad(in_mad);
637 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
638 in_mad->attr_mod = cpu_to_be32(port);
639
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000640 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
641 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
642
643 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
Or Gerlitza9c766b2012-01-11 19:00:29 +0200644 in_mad, out_mad);
645 if (err)
646 goto out;
647
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300648
Eli Cohenfa417f72010-10-24 21:08:52 -0700649 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
650 props->lmc = out_mad->data[34] & 0x7;
651 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
652 props->sm_sl = out_mad->data[36] & 0xf;
653 props->state = out_mad->data[32] & 0xf;
654 props->phys_state = out_mad->data[33] >> 4;
655 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000656 if (netw_view)
657 props->gid_tbl_len = out_mad->data[50];
658 else
659 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
Eli Cohenfa417f72010-10-24 21:08:52 -0700660 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
661 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
662 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
663 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
664 props->active_width = out_mad->data[31] & 0xf;
665 props->active_speed = out_mad->data[35] >> 4;
666 props->max_mtu = out_mad->data[41] & 0xf;
667 props->active_mtu = out_mad->data[36] >> 4;
668 props->subnet_timeout = out_mad->data[51] & 0x1f;
669 props->max_vl_num = out_mad->data[37] >> 4;
670 props->init_type_reply = out_mad->data[41] >> 4;
671
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300672 /* Check if extended speeds (EDR/FDR/...) are supported */
673 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
674 ext_active_speed = out_mad->data[62] >> 4;
675
676 switch (ext_active_speed) {
677 case 1:
Or Gerlitz2e966912012-02-28 18:49:50 +0200678 props->active_speed = IB_SPEED_FDR;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300679 break;
680 case 2:
Or Gerlitz2e966912012-02-28 18:49:50 +0200681 props->active_speed = IB_SPEED_EDR;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300682 break;
683 }
684 }
685
686 /* If reported active speed is QDR, check if is FDR-10 */
Or Gerlitz2e966912012-02-28 18:49:50 +0200687 if (props->active_speed == IB_SPEED_QDR) {
Or Gerlitz8154c072012-03-06 15:50:50 +0200688 init_query_mad(in_mad);
689 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
690 in_mad->attr_mod = cpu_to_be32(port);
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300691
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000692 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
Or Gerlitz8154c072012-03-06 15:50:50 +0200693 NULL, NULL, in_mad, out_mad);
694 if (err)
Jesper Juhlbf6b47d2012-04-11 23:43:29 +0200695 goto out;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300696
Or Gerlitz8154c072012-03-06 15:50:50 +0200697 /* Checking LinkSpeedActive for FDR-10 */
698 if (out_mad->data[15] & 0x1)
699 props->active_speed = IB_SPEED_FDR10;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300700 }
Or Gerlitzd2ef4062012-04-02 17:45:20 +0300701
702 /* Avoid wrong speed value returned by FW if the IB link is down. */
703 if (props->state == IB_PORT_DOWN)
704 props->active_speed = IB_SPEED_SDR;
705
Or Gerlitza9c766b2012-01-11 19:00:29 +0200706out:
707 kfree(in_mad);
708 kfree(out_mad);
709 return err;
Eli Cohenfa417f72010-10-24 21:08:52 -0700710}
711
712static u8 state_to_phys_state(enum ib_port_state state)
713{
714 return state == IB_PORT_ACTIVE ? 5 : 3;
715}
716
717static int eth_link_query_port(struct ib_device *ibdev, u8 port,
Leon Romanovsky850b7412017-01-25 20:26:18 +0200718 struct ib_port_attr *props)
Eli Cohenfa417f72010-10-24 21:08:52 -0700719{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200720
721 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
722 struct mlx4_ib_iboe *iboe = &mdev->iboe;
Eli Cohenfa417f72010-10-24 21:08:52 -0700723 struct net_device *ndev;
724 enum ib_mtu tmp;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200725 struct mlx4_cmd_mailbox *mailbox;
726 int err = 0;
Moni Shouaa5750092015-02-03 16:48:37 +0200727 int is_bonded = mlx4_is_bonded(mdev->dev);
Eli Cohenfa417f72010-10-24 21:08:52 -0700728
Or Gerlitza9c766b2012-01-11 19:00:29 +0200729 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
730 if (IS_ERR(mailbox))
731 return PTR_ERR(mailbox);
732
733 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
734 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
735 MLX4_CMD_WRAPPED);
736 if (err)
737 goto out;
738
Saeed Mahameed6fa26202016-11-10 11:30:59 +0200739 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
740 (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
741 IB_WIDTH_4X : IB_WIDTH_1X;
742 props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
743 IB_SPEED_FDR : IB_SPEED_QDR;
Moni Shouab4a26a22014-02-09 11:54:34 +0200744 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200745 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
746 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
Eli Cohenfa417f72010-10-24 21:08:52 -0700747 props->pkey_tbl_len = 1;
Or Gerlitzbcacb892011-10-10 10:53:41 +0200748 props->max_mtu = IB_MTU_4096;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200749 props->max_vl_num = 2;
Eli Cohenfa417f72010-10-24 21:08:52 -0700750 props->state = IB_PORT_DOWN;
751 props->phys_state = state_to_phys_state(props->state);
752 props->active_mtu = IB_MTU_256;
Jack Morgensteindba3ad22014-08-21 14:28:41 +0300753 spin_lock_bh(&iboe->lock);
Eli Cohenfa417f72010-10-24 21:08:52 -0700754 ndev = iboe->netdevs[port - 1];
Moni Shoua5070cd22015-07-30 18:33:30 +0300755 if (ndev && is_bonded) {
756 rcu_read_lock(); /* required to get upper dev */
757 ndev = netdev_master_upper_dev_get_rcu(ndev);
758 rcu_read_unlock();
759 }
Eli Cohenfa417f72010-10-24 21:08:52 -0700760 if (!ndev)
Or Gerlitza9c766b2012-01-11 19:00:29 +0200761 goto out_unlock;
Eli Cohenfa417f72010-10-24 21:08:52 -0700762
763 tmp = iboe_get_mtu(ndev->mtu);
764 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
765
Eli Cohen21d606092010-11-11 21:05:58 +0000766 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
Eli Cohenfa417f72010-10-24 21:08:52 -0700767 IB_PORT_ACTIVE : IB_PORT_DOWN;
768 props->phys_state = state_to_phys_state(props->state);
Or Gerlitza9c766b2012-01-11 19:00:29 +0200769out_unlock:
Jack Morgensteindba3ad22014-08-21 14:28:41 +0300770 spin_unlock_bh(&iboe->lock);
Or Gerlitza9c766b2012-01-11 19:00:29 +0200771out:
772 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
773 return err;
Eli Cohenfa417f72010-10-24 21:08:52 -0700774}
775
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000776int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
777 struct ib_port_attr *props, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700778{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200779 int err;
Roland Dreier225c7b12007-05-08 18:00:38 -0700780
Or Gerlitzc4550c62017-01-24 13:02:39 +0200781 /* props being zeroed by the caller, avoid zeroing it here */
Roland Dreier225c7b12007-05-08 18:00:38 -0700782
Eli Cohenfa417f72010-10-24 21:08:52 -0700783 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000784 ib_link_query_port(ibdev, port, props, netw_view) :
Leon Romanovsky850b7412017-01-25 20:26:18 +0200785 eth_link_query_port(ibdev, port, props);
Roland Dreier225c7b12007-05-08 18:00:38 -0700786
787 return err;
788}
789
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000790static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
791 struct ib_port_attr *props)
792{
793 /* returns host view */
794 return __mlx4_ib_query_port(ibdev, port, props, 0);
795}
796
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000797int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
798 union ib_gid *gid, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700799{
800 struct ib_smp *in_mad = NULL;
801 struct ib_smp *out_mad = NULL;
802 int err = -ENOMEM;
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000803 struct mlx4_ib_dev *dev = to_mdev(ibdev);
804 int clear = 0;
805 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -0700806
807 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
808 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
809 if (!in_mad || !out_mad)
810 goto out;
811
812 init_query_mad(in_mad);
813 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
814 in_mad->attr_mod = cpu_to_be32(port);
815
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000816 if (mlx4_is_mfunc(dev->dev) && netw_view)
817 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
818
819 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700820 if (err)
821 goto out;
822
823 memcpy(gid->raw, out_mad->data + 8, 8);
824
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000825 if (mlx4_is_mfunc(dev->dev) && !netw_view) {
826 if (index) {
827 /* For any index > 0, return the null guid */
828 err = 0;
829 clear = 1;
830 goto out;
831 }
832 }
833
Roland Dreier225c7b12007-05-08 18:00:38 -0700834 init_query_mad(in_mad);
835 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
836 in_mad->attr_mod = cpu_to_be32(index / 8);
837
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000838 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000839 NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700840 if (err)
841 goto out;
842
843 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
844
845out:
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000846 if (clear)
847 memset(gid->raw + 8, 0, 8);
Roland Dreier225c7b12007-05-08 18:00:38 -0700848 kfree(in_mad);
849 kfree(out_mad);
850 return err;
851}
852
Eli Cohenfa417f72010-10-24 21:08:52 -0700853static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
854 union ib_gid *gid)
855{
Moni Shoua5070cd22015-07-30 18:33:30 +0300856 int ret;
857
858 if (rdma_protocol_ib(ibdev, port))
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000859 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
Moni Shoua5070cd22015-07-30 18:33:30 +0300860
861 if (!rdma_protocol_roce(ibdev, port))
862 return -ENODEV;
863
864 if (!rdma_cap_roce_gid_table(ibdev, port))
865 return -ENODEV;
866
Matan Barak55ee3ab2015-10-15 18:38:45 +0300867 ret = ib_get_cached_gid(ibdev, port, index, gid, NULL);
Moni Shoua5070cd22015-07-30 18:33:30 +0300868 if (ret == -EAGAIN) {
869 memcpy(gid, &zgid, sizeof(*gid));
870 return 0;
871 }
872
873 return ret;
Eli Cohenfa417f72010-10-24 21:08:52 -0700874}
875
Jack Morgensteinfd10ed82016-09-12 19:16:21 +0300876static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
877{
878 union sl2vl_tbl_to_u64 sl2vl64;
879 struct ib_smp *in_mad = NULL;
880 struct ib_smp *out_mad = NULL;
881 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
882 int err = -ENOMEM;
883 int jj;
884
885 if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
886 *sl2vl_tbl = 0;
887 return 0;
888 }
889
890 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
891 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
892 if (!in_mad || !out_mad)
893 goto out;
894
895 init_query_mad(in_mad);
896 in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
897 in_mad->attr_mod = 0;
898
899 if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
900 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
901
902 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
903 in_mad, out_mad);
904 if (err)
905 goto out;
906
907 for (jj = 0; jj < 8; jj++)
908 sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
909 *sl2vl_tbl = sl2vl64.sl64;
910
911out:
912 kfree(in_mad);
913 kfree(out_mad);
914 return err;
915}
916
917static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
918{
919 u64 sl2vl;
920 int i;
921 int err;
922
923 for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
924 if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
925 continue;
926 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
927 if (err) {
928 pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n",
929 i, err);
930 sl2vl = 0;
931 }
932 atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
933 }
934}
935
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000936int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
937 u16 *pkey, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700938{
939 struct ib_smp *in_mad = NULL;
940 struct ib_smp *out_mad = NULL;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000941 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -0700942 int err = -ENOMEM;
943
944 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
945 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
946 if (!in_mad || !out_mad)
947 goto out;
948
949 init_query_mad(in_mad);
950 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
951 in_mad->attr_mod = cpu_to_be32(index / 32);
952
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000953 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
954 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
955
956 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
957 in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700958 if (err)
959 goto out;
960
961 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
962
963out:
964 kfree(in_mad);
965 kfree(out_mad);
966 return err;
967}
968
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000969static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
970{
971 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
972}
973
Roland Dreier225c7b12007-05-08 18:00:38 -0700974static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
975 struct ib_device_modify *props)
976{
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000977 struct mlx4_cmd_mailbox *mailbox;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000978 unsigned long flags;
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000979
Roland Dreier225c7b12007-05-08 18:00:38 -0700980 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
981 return -EOPNOTSUPP;
982
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000983 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
984 return 0;
985
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +0000986 if (mlx4_is_slave(to_mdev(ibdev)->dev))
987 return -EOPNOTSUPP;
988
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000989 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700990 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000991 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000992
993 /*
994 * If possible, pass node desc to FW, so it can generate
995 * a 144 trap. If cmd fails, just ignore.
996 */
997 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
998 if (IS_ERR(mailbox))
999 return 0;
1000
Yuval Shaiabd99fde2016-08-25 10:57:07 -07001001 memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
Jack Morgensteind0d68b82010-10-04 12:11:34 +00001002 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00001003 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
Jack Morgensteind0d68b82010-10-04 12:11:34 +00001004
1005 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
Roland Dreier225c7b12007-05-08 18:00:38 -07001006
1007 return 0;
1008}
1009
Jack Morgenstein61565012014-05-29 16:31:01 +03001010static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
1011 u32 cap_mask)
Roland Dreier225c7b12007-05-08 18:00:38 -07001012{
1013 struct mlx4_cmd_mailbox *mailbox;
1014 int err;
1015
1016 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
1017 if (IS_ERR(mailbox))
1018 return PTR_ERR(mailbox);
1019
Roland Dreier5ae2a7a2007-06-18 08:15:02 -07001020 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1021 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
1022 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
1023 } else {
1024 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
1025 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
1026 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001027
Ido Shamaya130b592015-04-02 16:31:19 +03001028 err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
1029 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1030 MLX4_CMD_WRAPPED);
Roland Dreier225c7b12007-05-08 18:00:38 -07001031
1032 mlx4_free_cmd_mailbox(dev->dev, mailbox);
1033 return err;
1034}
1035
1036static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1037 struct ib_port_modify *props)
1038{
Jack Morgenstein61565012014-05-29 16:31:01 +03001039 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1040 u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
Roland Dreier225c7b12007-05-08 18:00:38 -07001041 struct ib_port_attr attr;
1042 u32 cap_mask;
1043 int err;
1044
Jack Morgenstein61565012014-05-29 16:31:01 +03001045 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
1046 * of whether port link layer is ETH or IB. For ETH ports, qkey
1047 * violations and port capabilities are not meaningful.
1048 */
1049 if (is_eth)
1050 return 0;
1051
1052 mutex_lock(&mdev->cap_mask_mutex);
Roland Dreier225c7b12007-05-08 18:00:38 -07001053
Or Gerlitzc4550c62017-01-24 13:02:39 +02001054 err = ib_query_port(ibdev, port, &attr);
Roland Dreier225c7b12007-05-08 18:00:38 -07001055 if (err)
1056 goto out;
1057
1058 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
1059 ~props->clr_port_cap_mask;
1060
Jack Morgenstein61565012014-05-29 16:31:01 +03001061 err = mlx4_ib_SET_PORT(mdev, port,
1062 !!(mask & IB_PORT_RESET_QKEY_CNTR),
1063 cap_mask);
Roland Dreier225c7b12007-05-08 18:00:38 -07001064
1065out:
1066 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1067 return err;
1068}
1069
1070static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
1071 struct ib_udata *udata)
1072{
1073 struct mlx4_ib_dev *dev = to_mdev(ibdev);
1074 struct mlx4_ib_ucontext *context;
Or Gerlitz08ff3232012-10-21 14:59:24 +00001075 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
Roland Dreier225c7b12007-05-08 18:00:38 -07001076 struct mlx4_ib_alloc_ucontext_resp resp;
1077 int err;
1078
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07001079 if (!dev->ib_active)
1080 return ERR_PTR(-EAGAIN);
1081
Or Gerlitz08ff3232012-10-21 14:59:24 +00001082 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
1083 resp_v3.qp_tab_size = dev->dev->caps.num_qps;
1084 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
1085 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1086 } else {
1087 resp.dev_caps = dev->dev->caps.userspace_caps;
1088 resp.qp_tab_size = dev->dev->caps.num_qps;
1089 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
1090 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1091 resp.cqe_size = dev->dev->caps.cqe_size;
1092 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001093
Yishai Hadasae184dd2015-08-13 18:32:06 +03001094 context = kzalloc(sizeof(*context), GFP_KERNEL);
Roland Dreier225c7b12007-05-08 18:00:38 -07001095 if (!context)
1096 return ERR_PTR(-ENOMEM);
1097
1098 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1099 if (err) {
1100 kfree(context);
1101 return ERR_PTR(err);
1102 }
1103
1104 INIT_LIST_HEAD(&context->db_page_list);
1105 mutex_init(&context->db_page_mutex);
1106
Guy Levi400b1eb2017-07-04 16:24:24 +03001107 INIT_LIST_HEAD(&context->wqn_ranges_list);
1108 mutex_init(&context->wqn_ranges_mutex);
1109
Or Gerlitz08ff3232012-10-21 14:59:24 +00001110 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1111 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1112 else
1113 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1114
Roland Dreier225c7b12007-05-08 18:00:38 -07001115 if (err) {
1116 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1117 kfree(context);
1118 return ERR_PTR(-EFAULT);
1119 }
1120
1121 return &context->ibucontext;
1122}
1123
1124static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1125{
1126 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1127
1128 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1129 kfree(context);
1130
1131 return 0;
1132}
1133
Yishai Hadasae184dd2015-08-13 18:32:06 +03001134static void mlx4_ib_vma_open(struct vm_area_struct *area)
1135{
1136 /* vma_open is called when a new VMA is created on top of our VMA.
1137 * This is done through either mremap flow or split_vma (usually due
1138 * to mlock, madvise, munmap, etc.). We do not support a clone of the
1139 * vma, as this VMA is strongly hardware related. Therefore we set the
1140 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1141 * calling us again and trying to do incorrect actions. We assume that
1142 * the original vma size is exactly a single page that there will be no
1143 * "splitting" operations on.
1144 */
1145 area->vm_ops = NULL;
1146}
1147
1148static void mlx4_ib_vma_close(struct vm_area_struct *area)
1149{
1150 struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
1151
1152 /* It's guaranteed that all VMAs opened on a FD are closed before the
1153 * file itself is closed, therefore no sync is needed with the regular
1154 * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
1155 * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
1156 * The close operation is usually called under mm->mmap_sem except when
1157 * process is exiting. The exiting case is handled explicitly as part
1158 * of mlx4_ib_disassociate_ucontext.
1159 */
1160 mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
1161 area->vm_private_data;
1162
1163 /* set the vma context pointer to null in the mlx4_ib driver's private
1164 * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
1165 */
1166 mlx4_ib_vma_priv_data->vma = NULL;
1167}
1168
1169static const struct vm_operations_struct mlx4_ib_vm_ops = {
1170 .open = mlx4_ib_vma_open,
1171 .close = mlx4_ib_vma_close
1172};
1173
1174static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1175{
1176 int i;
1177 int ret = 0;
1178 struct vm_area_struct *vma;
1179 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1180 struct task_struct *owning_process = NULL;
1181 struct mm_struct *owning_mm = NULL;
1182
1183 owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
1184 if (!owning_process)
1185 return;
1186
1187 owning_mm = get_task_mm(owning_process);
1188 if (!owning_mm) {
1189 pr_info("no mm, disassociate ucontext is pending task termination\n");
1190 while (1) {
1191 /* make sure that task is dead before returning, it may
1192 * prevent a rare case of module down in parallel to a
1193 * call to mlx4_ib_vma_close.
1194 */
1195 put_task_struct(owning_process);
Leon Romanovsky98e77d92017-05-23 11:29:42 +03001196 usleep_range(1000, 2000);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001197 owning_process = get_pid_task(ibcontext->tgid,
1198 PIDTYPE_PID);
1199 if (!owning_process ||
1200 owning_process->state == TASK_DEAD) {
1201 pr_info("disassociate ucontext done, task was terminated\n");
1202 /* in case task was dead need to release the task struct */
1203 if (owning_process)
1204 put_task_struct(owning_process);
1205 return;
1206 }
1207 }
1208 }
1209
1210 /* need to protect from a race on closing the vma as part of
1211 * mlx4_ib_vma_close().
1212 */
Maor Gottlieb22c36532017-03-29 06:03:00 +03001213 down_write(&owning_mm->mmap_sem);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001214 for (i = 0; i < HW_BAR_COUNT; i++) {
1215 vma = context->hw_bar_info[i].vma;
1216 if (!vma)
1217 continue;
1218
1219 ret = zap_vma_ptes(context->hw_bar_info[i].vma,
1220 context->hw_bar_info[i].vma->vm_start,
1221 PAGE_SIZE);
1222 if (ret) {
1223 pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret);
1224 BUG_ON(1);
1225 }
1226
Maor Gottliebca37a662017-03-29 06:03:01 +03001227 context->hw_bar_info[i].vma->vm_flags &=
1228 ~(VM_SHARED | VM_MAYSHARE);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001229 /* context going to be destroyed, should not access ops any more */
1230 context->hw_bar_info[i].vma->vm_ops = NULL;
1231 }
1232
Maor Gottlieb22c36532017-03-29 06:03:00 +03001233 up_write(&owning_mm->mmap_sem);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001234 mmput(owning_mm);
1235 put_task_struct(owning_process);
1236}
1237
1238static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
1239 struct mlx4_ib_vma_private_data *vma_private_data)
1240{
1241 vma_private_data->vma = vma;
1242 vma->vm_private_data = vma_private_data;
1243 vma->vm_ops = &mlx4_ib_vm_ops;
1244}
1245
Roland Dreier225c7b12007-05-08 18:00:38 -07001246static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1247{
1248 struct mlx4_ib_dev *dev = to_mdev(context->device);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001249 struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
Roland Dreier225c7b12007-05-08 18:00:38 -07001250
1251 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1252 return -EINVAL;
1253
1254 if (vma->vm_pgoff == 0) {
Yishai Hadasae184dd2015-08-13 18:32:06 +03001255 /* We prevent double mmaping on same context */
1256 if (mucontext->hw_bar_info[HW_BAR_DB].vma)
1257 return -EINVAL;
1258
Roland Dreier225c7b12007-05-08 18:00:38 -07001259 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1260
1261 if (io_remap_pfn_range(vma, vma->vm_start,
1262 to_mucontext(context)->uar.pfn,
1263 PAGE_SIZE, vma->vm_page_prot))
1264 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001265
1266 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
1267
Roland Dreier225c7b12007-05-08 18:00:38 -07001268 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
Yishai Hadasae184dd2015-08-13 18:32:06 +03001269 /* We prevent double mmaping on same context */
1270 if (mucontext->hw_bar_info[HW_BAR_BF].vma)
1271 return -EINVAL;
1272
Roland Dreiere1d60ec2009-03-30 08:31:05 -07001273 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Roland Dreier225c7b12007-05-08 18:00:38 -07001274
1275 if (io_remap_pfn_range(vma, vma->vm_start,
1276 to_mucontext(context)->uar.pfn +
1277 dev->dev->caps.num_uars,
1278 PAGE_SIZE, vma->vm_page_prot))
1279 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001280
1281 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
1282
Matan Barak52033cf2015-06-11 16:35:26 +03001283 } else if (vma->vm_pgoff == 3) {
1284 struct mlx4_clock_params params;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001285 int ret;
1286
1287 /* We prevent double mmaping on same context */
1288 if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
1289 return -EINVAL;
1290
1291 ret = mlx4_get_internal_clock_params(dev->dev, &params);
Matan Barak52033cf2015-06-11 16:35:26 +03001292
1293 if (ret)
1294 return ret;
1295
1296 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1297 if (io_remap_pfn_range(vma, vma->vm_start,
1298 (pci_resource_start(dev->dev->persist->pdev,
1299 params.bar) +
1300 params.offset)
1301 >> PAGE_SHIFT,
1302 PAGE_SIZE, vma->vm_page_prot))
1303 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001304
1305 mlx4_ib_set_vma_data(vma,
1306 &mucontext->hw_bar_info[HW_BAR_CLOCK]);
Matan Barak52033cf2015-06-11 16:35:26 +03001307 } else {
Roland Dreier225c7b12007-05-08 18:00:38 -07001308 return -EINVAL;
Matan Barak52033cf2015-06-11 16:35:26 +03001309 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001310
1311 return 0;
1312}
1313
1314static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
1315 struct ib_ucontext *context,
1316 struct ib_udata *udata)
1317{
1318 struct mlx4_ib_pd *pd;
1319 int err;
1320
1321 pd = kmalloc(sizeof *pd, GFP_KERNEL);
1322 if (!pd)
1323 return ERR_PTR(-ENOMEM);
1324
1325 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1326 if (err) {
1327 kfree(pd);
1328 return ERR_PTR(err);
1329 }
1330
1331 if (context)
1332 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
1333 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1334 kfree(pd);
1335 return ERR_PTR(-EFAULT);
1336 }
1337
1338 return &pd->ibpd;
1339}
1340
1341static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
1342{
1343 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1344 kfree(pd);
1345
1346 return 0;
1347}
1348
Sean Hefty012a8ff2011-06-02 09:01:33 -07001349static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
1350 struct ib_ucontext *context,
1351 struct ib_udata *udata)
1352{
1353 struct mlx4_ib_xrcd *xrcd;
Matan Barak8e372102015-06-11 16:35:21 +03001354 struct ib_cq_init_attr cq_attr = {};
Sean Hefty012a8ff2011-06-02 09:01:33 -07001355 int err;
1356
1357 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1358 return ERR_PTR(-ENOSYS);
1359
1360 xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
1361 if (!xrcd)
1362 return ERR_PTR(-ENOMEM);
1363
1364 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
1365 if (err)
1366 goto err1;
1367
Christoph Hellwiged082d32016-09-05 12:56:17 +02001368 xrcd->pd = ib_alloc_pd(ibdev, 0);
Sean Hefty012a8ff2011-06-02 09:01:33 -07001369 if (IS_ERR(xrcd->pd)) {
1370 err = PTR_ERR(xrcd->pd);
1371 goto err2;
1372 }
1373
Matan Barak8e372102015-06-11 16:35:21 +03001374 cq_attr.cqe = 1;
1375 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
Sean Hefty012a8ff2011-06-02 09:01:33 -07001376 if (IS_ERR(xrcd->cq)) {
1377 err = PTR_ERR(xrcd->cq);
1378 goto err3;
1379 }
1380
1381 return &xrcd->ibxrcd;
1382
1383err3:
1384 ib_dealloc_pd(xrcd->pd);
1385err2:
1386 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
1387err1:
1388 kfree(xrcd);
1389 return ERR_PTR(err);
1390}
1391
1392static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1393{
1394 ib_destroy_cq(to_mxrcd(xrcd)->cq);
1395 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1396 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1397 kfree(xrcd);
1398
1399 return 0;
1400}
1401
Eli Cohenfa417f72010-10-24 21:08:52 -07001402static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1403{
1404 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1405 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1406 struct mlx4_ib_gid_entry *ge;
1407
1408 ge = kzalloc(sizeof *ge, GFP_KERNEL);
1409 if (!ge)
1410 return -ENOMEM;
1411
1412 ge->gid = *gid;
1413 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1414 ge->port = mqp->port;
1415 ge->added = 1;
1416 }
1417
1418 mutex_lock(&mqp->mutex);
1419 list_add_tail(&ge->list, &mqp->gid_list);
1420 mutex_unlock(&mqp->mutex);
1421
1422 return 0;
1423}
1424
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03001425static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1426 struct mlx4_ib_counters *ctr_table)
1427{
1428 struct counter_index *counter, *tmp_count;
1429
1430 mutex_lock(&ctr_table->mutex);
1431 list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1432 list) {
1433 if (counter->allocated)
1434 mlx4_counter_free(ibdev->dev, counter->index);
1435 list_del(&counter->list);
1436 kfree(counter);
1437 }
1438 mutex_unlock(&ctr_table->mutex);
1439}
1440
Eli Cohenfa417f72010-10-24 21:08:52 -07001441int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1442 union ib_gid *gid)
1443{
Eli Cohenfa417f72010-10-24 21:08:52 -07001444 struct net_device *ndev;
1445 int ret = 0;
1446
1447 if (!mqp->port)
1448 return 0;
1449
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001450 spin_lock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001451 ndev = mdev->iboe.netdevs[mqp->port - 1];
1452 if (ndev)
1453 dev_hold(ndev);
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001454 spin_unlock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001455
1456 if (ndev) {
Eli Cohenfa417f72010-10-24 21:08:52 -07001457 ret = 1;
Eli Cohenfa417f72010-10-24 21:08:52 -07001458 dev_put(ndev);
1459 }
1460
1461 return ret;
1462}
1463
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001464struct mlx4_ib_steering {
1465 struct list_head list;
Moni Shoua146d6e12015-02-03 16:48:38 +02001466 struct mlx4_flow_reg_id reg_id;
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001467 union ib_gid gid;
1468};
1469
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001470#define LAST_ETH_FIELD vlan_tag
1471#define LAST_IB_FIELD sl
1472#define LAST_IPV4_FIELD dst_ip
1473#define LAST_TCP_UDP_FIELD src_port
1474
1475/* Field is the last supported field */
1476#define FIELDS_NOT_SUPPORTED(filter, field)\
1477 memchr_inv((void *)&filter.field +\
1478 sizeof(filter.field), 0,\
1479 sizeof(filter) -\
1480 offsetof(typeof(filter), field) -\
1481 sizeof(filter.field))
1482
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001483static int parse_flow_attr(struct mlx4_dev *dev,
Matan Baraka37a1a42013-11-07 15:25:16 +02001484 u32 qp_num,
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001485 union ib_flow_spec *ib_spec,
1486 struct _rule_hw *mlx4_spec)
1487{
1488 enum mlx4_net_trans_rule_id type;
1489
1490 switch (ib_spec->type) {
1491 case IB_FLOW_SPEC_ETH:
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001492 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1493 return -ENOTSUPP;
1494
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001495 type = MLX4_NET_TRANS_RULE_ID_ETH;
1496 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1497 ETH_ALEN);
1498 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1499 ETH_ALEN);
1500 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1501 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1502 break;
Matan Baraka37a1a42013-11-07 15:25:16 +02001503 case IB_FLOW_SPEC_IB:
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001504 if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
1505 return -ENOTSUPP;
1506
Matan Baraka37a1a42013-11-07 15:25:16 +02001507 type = MLX4_NET_TRANS_RULE_ID_IB;
1508 mlx4_spec->ib.l3_qpn =
1509 cpu_to_be32(qp_num);
1510 mlx4_spec->ib.qpn_mask =
1511 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1512 break;
1513
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001514
1515 case IB_FLOW_SPEC_IPV4:
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001516 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1517 return -ENOTSUPP;
1518
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001519 type = MLX4_NET_TRANS_RULE_ID_IPV4;
1520 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1521 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1522 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1523 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1524 break;
1525
1526 case IB_FLOW_SPEC_TCP:
1527 case IB_FLOW_SPEC_UDP:
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001528 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
1529 return -ENOTSUPP;
1530
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001531 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1532 MLX4_NET_TRANS_RULE_ID_TCP :
1533 MLX4_NET_TRANS_RULE_ID_UDP;
1534 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1535 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1536 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1537 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1538 break;
1539
1540 default:
1541 return -EINVAL;
1542 }
1543 if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1544 mlx4_hw_rule_sz(dev, type) < 0)
1545 return -EINVAL;
1546 mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1547 mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1548 return mlx4_hw_rule_sz(dev, type);
1549}
1550
Matan Baraka37a1a42013-11-07 15:25:16 +02001551struct default_rules {
1552 __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1553 __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1554 __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1555 __u8 link_layer;
1556};
1557static const struct default_rules default_table[] = {
1558 {
1559 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
1560 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1561 .rules_create_list = {IB_FLOW_SPEC_IB},
1562 .link_layer = IB_LINK_LAYER_INFINIBAND
1563 }
1564};
1565
1566static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1567 struct ib_flow_attr *flow_attr)
1568{
1569 int i, j, k;
1570 void *ib_flow;
1571 const struct default_rules *pdefault_rules = default_table;
1572 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1573
Fabian Fredericka57f23f2014-08-12 19:20:07 -04001574 for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001575 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1576 memset(&field_types, 0, sizeof(field_types));
1577
1578 if (link_layer != pdefault_rules->link_layer)
1579 continue;
1580
1581 ib_flow = flow_attr + 1;
1582 /* we assume the specs are sorted */
1583 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1584 j < flow_attr->num_of_specs; k++) {
1585 union ib_flow_spec *current_flow =
1586 (union ib_flow_spec *)ib_flow;
1587
1588 /* same layer but different type */
1589 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1590 (pdefault_rules->mandatory_fields[k] &
1591 IB_FLOW_SPEC_LAYER_MASK)) &&
1592 (current_flow->type !=
1593 pdefault_rules->mandatory_fields[k]))
1594 goto out;
1595
1596 /* same layer, try match next one */
1597 if (current_flow->type ==
1598 pdefault_rules->mandatory_fields[k]) {
1599 j++;
1600 ib_flow +=
1601 ((union ib_flow_spec *)ib_flow)->size;
1602 }
1603 }
1604
1605 ib_flow = flow_attr + 1;
1606 for (j = 0; j < flow_attr->num_of_specs;
1607 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1608 for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1609 /* same layer and same type */
1610 if (((union ib_flow_spec *)ib_flow)->type ==
1611 pdefault_rules->mandatory_not_fields[k])
1612 goto out;
1613
1614 return i;
1615 }
1616out:
1617 return -1;
1618}
1619
1620static int __mlx4_ib_create_default_rules(
1621 struct mlx4_ib_dev *mdev,
1622 struct ib_qp *qp,
1623 const struct default_rules *pdefault_rules,
1624 struct _rule_hw *mlx4_spec) {
1625 int size = 0;
1626 int i;
1627
Fabian Fredericka57f23f2014-08-12 19:20:07 -04001628 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001629 int ret;
1630 union ib_flow_spec ib_spec;
1631 switch (pdefault_rules->rules_create_list[i]) {
1632 case 0:
1633 /* no rule */
1634 continue;
1635 case IB_FLOW_SPEC_IB:
1636 ib_spec.type = IB_FLOW_SPEC_IB;
1637 ib_spec.size = sizeof(struct ib_flow_spec_ib);
1638
1639 break;
1640 default:
1641 /* invalid rule */
1642 return -EINVAL;
1643 }
1644 /* We must put empty rule, qpn is being ignored */
1645 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1646 mlx4_spec);
1647 if (ret < 0) {
1648 pr_info("invalid parsing\n");
1649 return -EINVAL;
1650 }
1651
1652 mlx4_spec = (void *)mlx4_spec + ret;
1653 size += ret;
1654 }
1655 return size;
1656}
1657
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001658static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1659 int domain,
1660 enum mlx4_net_trans_promisc_mode flow_type,
1661 u64 *reg_id)
1662{
1663 int ret, i;
1664 int size = 0;
1665 void *ib_flow;
1666 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1667 struct mlx4_cmd_mailbox *mailbox;
1668 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
Matan Baraka37a1a42013-11-07 15:25:16 +02001669 int default_flow;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001670
1671 static const u16 __mlx4_domain[] = {
1672 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1673 [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1674 [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1675 [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1676 };
1677
1678 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1679 pr_err("Invalid priority value %d\n", flow_attr->priority);
1680 return -EINVAL;
1681 }
1682
1683 if (domain >= IB_FLOW_DOMAIN_NUM) {
1684 pr_err("Invalid domain value %d\n", domain);
1685 return -EINVAL;
1686 }
1687
1688 if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1689 return -EINVAL;
1690
1691 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1692 if (IS_ERR(mailbox))
1693 return PTR_ERR(mailbox);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001694 ctrl = mailbox->buf;
1695
1696 ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1697 flow_attr->priority);
1698 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1699 ctrl->port = flow_attr->port;
1700 ctrl->qpn = cpu_to_be32(qp->qp_num);
1701
1702 ib_flow = flow_attr + 1;
1703 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
Matan Baraka37a1a42013-11-07 15:25:16 +02001704 /* Add default flows */
1705 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1706 if (default_flow >= 0) {
1707 ret = __mlx4_ib_create_default_rules(
1708 mdev, qp, default_table + default_flow,
1709 mailbox->buf + size);
1710 if (ret < 0) {
1711 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1712 return -EINVAL;
1713 }
1714 size += ret;
1715 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001716 for (i = 0; i < flow_attr->num_of_specs; i++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001717 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1718 mailbox->buf + size);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001719 if (ret < 0) {
1720 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1721 return -EINVAL;
1722 }
1723 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1724 size += ret;
1725 }
1726
Jack Morgenstein10b1c042016-12-29 18:37:13 +02001727 if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
1728 flow_attr->num_of_specs == 1) {
1729 struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
1730 enum ib_flow_spec_type header_spec =
1731 ((union ib_flow_spec *)(flow_attr + 1))->type;
1732
1733 if (header_spec == IB_FLOW_SPEC_ETH)
1734 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1735 }
1736
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001737 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1738 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
Jack Morgenstein10b1c042016-12-29 18:37:13 +02001739 MLX4_CMD_NATIVE);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001740 if (ret == -ENOMEM)
1741 pr_err("mcg table is full. Fail to register network rule.\n");
1742 else if (ret == -ENXIO)
1743 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1744 else if (ret)
Colin Ian King35fc7b72016-04-25 20:26:50 +01001745 pr_err("Invalid argument. Fail to register network rule.\n");
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001746
1747 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1748 return ret;
1749}
1750
1751static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1752{
1753 int err;
1754 err = mlx4_cmd(dev, reg_id, 0, 0,
1755 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
Jack Morgenstein10b1c042016-12-29 18:37:13 +02001756 MLX4_CMD_NATIVE);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001757 if (err)
1758 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1759 reg_id);
1760 return err;
1761}
1762
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001763static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1764 u64 *reg_id)
1765{
1766 void *ib_flow;
1767 union ib_flow_spec *ib_spec;
1768 struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1769 int err = 0;
1770
Or Gerlitz5eff6da2015-01-15 15:28:54 +02001771 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1772 dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001773 return 0; /* do nothing */
1774
1775 ib_flow = flow_attr + 1;
1776 ib_spec = (union ib_flow_spec *)ib_flow;
1777
1778 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1779 return 0; /* do nothing */
1780
1781 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1782 flow_attr->port, qp->qp_num,
1783 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1784 reg_id);
1785 return err;
1786}
1787
Marina Varshaver0e451e82016-02-18 18:31:06 +02001788static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
1789 struct ib_flow_attr *flow_attr,
1790 enum mlx4_net_trans_promisc_mode *type)
1791{
1792 int err = 0;
1793
1794 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
1795 (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
1796 (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
1797 return -EOPNOTSUPP;
1798 }
1799
1800 if (flow_attr->num_of_specs == 0) {
1801 type[0] = MLX4_FS_MC_SNIFFER;
1802 type[1] = MLX4_FS_UC_SNIFFER;
1803 } else {
1804 union ib_flow_spec *ib_spec;
1805
1806 ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1807 if (ib_spec->type != IB_FLOW_SPEC_ETH)
1808 return -EINVAL;
1809
1810 /* if all is zero than MC and UC */
1811 if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
1812 type[0] = MLX4_FS_MC_SNIFFER;
1813 type[1] = MLX4_FS_UC_SNIFFER;
1814 } else {
1815 u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
1816 ib_spec->eth.mask.dst_mac[1],
1817 ib_spec->eth.mask.dst_mac[2],
1818 ib_spec->eth.mask.dst_mac[3],
1819 ib_spec->eth.mask.dst_mac[4],
1820 ib_spec->eth.mask.dst_mac[5]};
1821
1822 /* Above xor was only on MC bit, non empty mask is valid
1823 * only if this bit is set and rest are zero.
1824 */
1825 if (!is_zero_ether_addr(&mac[0]))
1826 return -EINVAL;
1827
1828 if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
1829 type[0] = MLX4_FS_MC_SNIFFER;
1830 else
1831 type[0] = MLX4_FS_UC_SNIFFER;
1832 }
1833 }
1834
1835 return err;
1836}
1837
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001838static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1839 struct ib_flow_attr *flow_attr,
1840 int domain)
1841{
Moni Shoua146d6e12015-02-03 16:48:38 +02001842 int err = 0, i = 0, j = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001843 struct mlx4_ib_flow *mflow;
1844 enum mlx4_net_trans_promisc_mode type[2];
Moni Shoua146d6e12015-02-03 16:48:38 +02001845 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1846 int is_bonded = mlx4_is_bonded(dev);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001847
Yishai Hadas5533c182016-06-22 17:27:30 +03001848 if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
1849 return ERR_PTR(-EINVAL);
1850
Marina Varshaver0e451e82016-02-18 18:31:06 +02001851 if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1852 (flow_attr->type != IB_FLOW_ATTR_NORMAL))
Marina Varshavera3100a72016-02-18 18:31:05 +02001853 return ERR_PTR(-EOPNOTSUPP);
1854
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001855 memset(type, 0, sizeof(type));
1856
1857 mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1858 if (!mflow) {
1859 err = -ENOMEM;
1860 goto err_free;
1861 }
1862
1863 switch (flow_attr->type) {
1864 case IB_FLOW_ATTR_NORMAL:
Marina Varshaver0e451e82016-02-18 18:31:06 +02001865 /* If dont trap flag (continue match) is set, under specific
1866 * condition traffic be replicated to given qp,
1867 * without stealing it
1868 */
1869 if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
1870 err = mlx4_ib_add_dont_trap_rule(dev,
1871 flow_attr,
1872 type);
1873 if (err)
1874 goto err_free;
1875 } else {
1876 type[0] = MLX4_FS_REGULAR;
1877 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001878 break;
1879
1880 case IB_FLOW_ATTR_ALL_DEFAULT:
1881 type[0] = MLX4_FS_ALL_DEFAULT;
1882 break;
1883
1884 case IB_FLOW_ATTR_MC_DEFAULT:
1885 type[0] = MLX4_FS_MC_DEFAULT;
1886 break;
1887
1888 case IB_FLOW_ATTR_SNIFFER:
Marina Varshaver0e451e82016-02-18 18:31:06 +02001889 type[0] = MLX4_FS_MIRROR_RX_PORT;
1890 type[1] = MLX4_FS_MIRROR_SX_PORT;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001891 break;
1892
1893 default:
1894 err = -EINVAL;
1895 goto err_free;
1896 }
1897
1898 while (i < ARRAY_SIZE(type) && type[i]) {
1899 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
Moni Shoua146d6e12015-02-03 16:48:38 +02001900 &mflow->reg_id[i].id);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001901 if (err)
Or Gerlitz571e1b22014-10-30 15:59:28 +02001902 goto err_create_flow;
Moni Shoua146d6e12015-02-03 16:48:38 +02001903 if (is_bonded) {
Moni Shoua824c25c2015-02-08 11:49:33 +02001904 /* Application always sees one port so the mirror rule
1905 * must be on port #2
1906 */
Moni Shoua146d6e12015-02-03 16:48:38 +02001907 flow_attr->port = 2;
1908 err = __mlx4_ib_create_flow(qp, flow_attr,
1909 domain, type[j],
1910 &mflow->reg_id[j].mirror);
1911 flow_attr->port = 1;
1912 if (err)
1913 goto err_create_flow;
1914 j++;
1915 }
1916
Roland Dreier11562562015-05-29 23:11:27 -07001917 i++;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001918 }
1919
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001920 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
Moni Shoua146d6e12015-02-03 16:48:38 +02001921 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1922 &mflow->reg_id[i].id);
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001923 if (err)
Or Gerlitz571e1b22014-10-30 15:59:28 +02001924 goto err_create_flow;
Roland Dreier11562562015-05-29 23:11:27 -07001925
Moni Shoua146d6e12015-02-03 16:48:38 +02001926 if (is_bonded) {
1927 flow_attr->port = 2;
1928 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1929 &mflow->reg_id[j].mirror);
1930 flow_attr->port = 1;
1931 if (err)
1932 goto err_create_flow;
1933 j++;
1934 }
1935 /* function to create mirror rule */
Roland Dreier11562562015-05-29 23:11:27 -07001936 i++;
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001937 }
1938
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001939 return &mflow->ibflow;
1940
Or Gerlitz571e1b22014-10-30 15:59:28 +02001941err_create_flow:
1942 while (i) {
Moni Shoua146d6e12015-02-03 16:48:38 +02001943 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1944 mflow->reg_id[i].id);
Or Gerlitz571e1b22014-10-30 15:59:28 +02001945 i--;
1946 }
Moni Shoua146d6e12015-02-03 16:48:38 +02001947
1948 while (j) {
1949 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1950 mflow->reg_id[j].mirror);
1951 j--;
1952 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001953err_free:
1954 kfree(mflow);
1955 return ERR_PTR(err);
1956}
1957
1958static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1959{
1960 int err, ret = 0;
1961 int i = 0;
1962 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1963 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1964
Moni Shoua146d6e12015-02-03 16:48:38 +02001965 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1966 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001967 if (err)
1968 ret = err;
Moni Shoua146d6e12015-02-03 16:48:38 +02001969 if (mflow->reg_id[i].mirror) {
1970 err = __mlx4_ib_destroy_flow(mdev->dev,
1971 mflow->reg_id[i].mirror);
1972 if (err)
1973 ret = err;
1974 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001975 i++;
1976 }
1977
1978 kfree(mflow);
1979 return ret;
1980}
1981
Roland Dreier225c7b12007-05-08 18:00:38 -07001982static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1983{
Eli Cohenfa417f72010-10-24 21:08:52 -07001984 int err;
1985 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
Moni Shoua146d6e12015-02-03 16:48:38 +02001986 struct mlx4_dev *dev = mdev->dev;
Eli Cohenfa417f72010-10-24 21:08:52 -07001987 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001988 struct mlx4_ib_steering *ib_steering = NULL;
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001989 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
Moni Shoua146d6e12015-02-03 16:48:38 +02001990 struct mlx4_flow_reg_id reg_id;
Eli Cohenfa417f72010-10-24 21:08:52 -07001991
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001992 if (mdev->dev->caps.steering_mode ==
1993 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1994 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1995 if (!ib_steering)
1996 return -ENOMEM;
1997 }
1998
1999 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
2000 !!(mqp->flags &
2001 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
Moni Shoua146d6e12015-02-03 16:48:38 +02002002 prot, &reg_id.id);
Or Gerlitze9a7faf2014-12-17 16:17:34 +02002003 if (err) {
2004 pr_err("multicast attach op failed, err %d\n", err);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00002005 goto err_malloc;
Or Gerlitze9a7faf2014-12-17 16:17:34 +02002006 }
Eli Cohenfa417f72010-10-24 21:08:52 -07002007
Moni Shoua146d6e12015-02-03 16:48:38 +02002008 reg_id.mirror = 0;
2009 if (mlx4_is_bonded(dev)) {
Moni Shoua824c25c2015-02-08 11:49:33 +02002010 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
2011 (mqp->port == 1) ? 2 : 1,
Moni Shoua146d6e12015-02-03 16:48:38 +02002012 !!(mqp->flags &
2013 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
2014 prot, &reg_id.mirror);
2015 if (err)
2016 goto err_add;
2017 }
2018
Eli Cohenfa417f72010-10-24 21:08:52 -07002019 err = add_gid_entry(ibqp, gid);
2020 if (err)
2021 goto err_add;
2022
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00002023 if (ib_steering) {
2024 memcpy(ib_steering->gid.raw, gid->raw, 16);
2025 ib_steering->reg_id = reg_id;
2026 mutex_lock(&mqp->mutex);
2027 list_add(&ib_steering->list, &mqp->steering_rules);
2028 mutex_unlock(&mqp->mutex);
2029 }
Eli Cohenfa417f72010-10-24 21:08:52 -07002030 return 0;
2031
2032err_add:
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00002033 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
Moni Shoua146d6e12015-02-03 16:48:38 +02002034 prot, reg_id.id);
2035 if (reg_id.mirror)
2036 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
2037 prot, reg_id.mirror);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00002038err_malloc:
2039 kfree(ib_steering);
2040
Eli Cohenfa417f72010-10-24 21:08:52 -07002041 return err;
2042}
2043
2044static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
2045{
2046 struct mlx4_ib_gid_entry *ge;
2047 struct mlx4_ib_gid_entry *tmp;
2048 struct mlx4_ib_gid_entry *ret = NULL;
2049
2050 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
2051 if (!memcmp(raw, ge->gid.raw, 16)) {
2052 ret = ge;
2053 break;
2054 }
2055 }
2056
2057 return ret;
Roland Dreier225c7b12007-05-08 18:00:38 -07002058}
2059
2060static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2061{
Eli Cohenfa417f72010-10-24 21:08:52 -07002062 int err;
2063 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
Moni Shoua146d6e12015-02-03 16:48:38 +02002064 struct mlx4_dev *dev = mdev->dev;
Eli Cohenfa417f72010-10-24 21:08:52 -07002065 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
Eli Cohenfa417f72010-10-24 21:08:52 -07002066 struct net_device *ndev;
2067 struct mlx4_ib_gid_entry *ge;
Moni Shoua146d6e12015-02-03 16:48:38 +02002068 struct mlx4_flow_reg_id reg_id = {0, 0};
Or Gerlitze9a7faf2014-12-17 16:17:34 +02002069 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
Eli Cohenfa417f72010-10-24 21:08:52 -07002070
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00002071 if (mdev->dev->caps.steering_mode ==
2072 MLX4_STEERING_MODE_DEVICE_MANAGED) {
2073 struct mlx4_ib_steering *ib_steering;
2074
2075 mutex_lock(&mqp->mutex);
2076 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
2077 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
2078 list_del(&ib_steering->list);
2079 break;
2080 }
2081 }
2082 mutex_unlock(&mqp->mutex);
2083 if (&ib_steering->list == &mqp->steering_rules) {
2084 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
2085 return -EINVAL;
2086 }
2087 reg_id = ib_steering->reg_id;
2088 kfree(ib_steering);
2089 }
2090
2091 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
Moni Shoua146d6e12015-02-03 16:48:38 +02002092 prot, reg_id.id);
Eli Cohenfa417f72010-10-24 21:08:52 -07002093 if (err)
2094 return err;
2095
Moni Shoua146d6e12015-02-03 16:48:38 +02002096 if (mlx4_is_bonded(dev)) {
2097 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
2098 prot, reg_id.mirror);
2099 if (err)
2100 return err;
2101 }
2102
Eli Cohenfa417f72010-10-24 21:08:52 -07002103 mutex_lock(&mqp->mutex);
2104 ge = find_gid_entry(mqp, gid->raw);
2105 if (ge) {
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002106 spin_lock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07002107 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
2108 if (ndev)
2109 dev_hold(ndev);
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002110 spin_unlock_bh(&mdev->iboe.lock);
Moni Shouad487ee72013-12-12 18:03:13 +02002111 if (ndev)
Eli Cohenfa417f72010-10-24 21:08:52 -07002112 dev_put(ndev);
Eli Cohenfa417f72010-10-24 21:08:52 -07002113 list_del(&ge->list);
2114 kfree(ge);
2115 } else
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03002116 pr_warn("could not find mgid entry\n");
Eli Cohenfa417f72010-10-24 21:08:52 -07002117
2118 mutex_unlock(&mqp->mutex);
2119
2120 return 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002121}
2122
2123static int init_node_data(struct mlx4_ib_dev *dev)
2124{
2125 struct ib_smp *in_mad = NULL;
2126 struct ib_smp *out_mad = NULL;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00002127 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -07002128 int err = -ENOMEM;
2129
2130 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
2131 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
2132 if (!in_mad || !out_mad)
2133 goto out;
2134
2135 init_query_mad(in_mad);
2136 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00002137 if (mlx4_is_master(dev->dev))
2138 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
Roland Dreier225c7b12007-05-08 18:00:38 -07002139
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00002140 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -07002141 if (err)
2142 goto out;
2143
Yuval Shaiabd99fde2016-08-25 10:57:07 -07002144 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
Roland Dreier225c7b12007-05-08 18:00:38 -07002145
2146 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
2147
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00002148 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -07002149 if (err)
2150 goto out;
2151
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00002152 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
Roland Dreier225c7b12007-05-08 18:00:38 -07002153 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
2154
2155out:
2156 kfree(in_mad);
2157 kfree(out_mad);
2158 return err;
2159}
2160
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002161static ssize_t show_hca(struct device *device, struct device_attribute *attr,
2162 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002163{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002164 struct mlx4_ib_dev *dev =
2165 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002166 return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002167}
2168
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002169static ssize_t show_rev(struct device *device, struct device_attribute *attr,
2170 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002171{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002172 struct mlx4_ib_dev *dev =
2173 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002174 return sprintf(buf, "%x\n", dev->dev->rev_id);
2175}
2176
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002177static ssize_t show_board(struct device *device, struct device_attribute *attr,
2178 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002179{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002180 struct mlx4_ib_dev *dev =
2181 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
2182 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
2183 dev->dev->board_id);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002184}
2185
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002186static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002187static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
2188static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002189
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002190static struct device_attribute *mlx4_class_attributes[] = {
2191 &dev_attr_hw_rev,
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002192 &dev_attr_hca_type,
2193 &dev_attr_board_id
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002194};
2195
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002196struct diag_counter {
2197 const char *name;
2198 u32 offset;
2199};
2200
2201#define DIAG_COUNTER(_name, _offset) \
2202 { .name = #_name, .offset = _offset }
2203
2204static const struct diag_counter diag_basic[] = {
2205 DIAG_COUNTER(rq_num_lle, 0x00),
2206 DIAG_COUNTER(sq_num_lle, 0x04),
2207 DIAG_COUNTER(rq_num_lqpoe, 0x08),
2208 DIAG_COUNTER(sq_num_lqpoe, 0x0C),
2209 DIAG_COUNTER(rq_num_lpe, 0x18),
2210 DIAG_COUNTER(sq_num_lpe, 0x1C),
2211 DIAG_COUNTER(rq_num_wrfe, 0x20),
2212 DIAG_COUNTER(sq_num_wrfe, 0x24),
2213 DIAG_COUNTER(sq_num_mwbe, 0x2C),
2214 DIAG_COUNTER(sq_num_bre, 0x34),
2215 DIAG_COUNTER(sq_num_rire, 0x44),
2216 DIAG_COUNTER(rq_num_rire, 0x48),
2217 DIAG_COUNTER(sq_num_rae, 0x4C),
2218 DIAG_COUNTER(rq_num_rae, 0x50),
2219 DIAG_COUNTER(sq_num_roe, 0x54),
2220 DIAG_COUNTER(sq_num_tree, 0x5C),
2221 DIAG_COUNTER(sq_num_rree, 0x64),
2222 DIAG_COUNTER(rq_num_rnr, 0x68),
2223 DIAG_COUNTER(sq_num_rnr, 0x6C),
2224 DIAG_COUNTER(rq_num_oos, 0x100),
2225 DIAG_COUNTER(sq_num_oos, 0x104),
2226};
2227
2228static const struct diag_counter diag_ext[] = {
2229 DIAG_COUNTER(rq_num_dup, 0x130),
2230 DIAG_COUNTER(sq_num_to, 0x134),
2231};
2232
2233static const struct diag_counter diag_device_only[] = {
2234 DIAG_COUNTER(num_cqovf, 0x1A0),
2235 DIAG_COUNTER(rq_num_udsdprd, 0x118),
2236};
2237
2238static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
2239 u8 port_num)
2240{
2241 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2242 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2243
2244 if (!diag[!!port_num].name)
2245 return NULL;
2246
2247 return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
2248 diag[!!port_num].num_counters,
2249 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2250}
2251
2252static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
2253 struct rdma_hw_stats *stats,
2254 u8 port, int index)
2255{
2256 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2257 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2258 u32 hw_value[ARRAY_SIZE(diag_device_only) +
2259 ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
2260 int ret;
2261 int i;
2262
2263 ret = mlx4_query_diag_counters(dev->dev,
2264 MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
2265 diag[!!port].offset, hw_value,
2266 diag[!!port].num_counters, port);
2267
2268 if (ret)
2269 return ret;
2270
2271 for (i = 0; i < diag[!!port].num_counters; i++)
2272 stats->value[i] = hw_value[i];
2273
2274 return diag[!!port].num_counters;
2275}
2276
2277static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
2278 const char ***name,
2279 u32 **offset,
2280 u32 *num,
2281 bool port)
2282{
2283 u32 num_counters;
2284
2285 num_counters = ARRAY_SIZE(diag_basic);
2286
2287 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
2288 num_counters += ARRAY_SIZE(diag_ext);
2289
2290 if (!port)
2291 num_counters += ARRAY_SIZE(diag_device_only);
2292
2293 *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
2294 if (!*name)
2295 return -ENOMEM;
2296
2297 *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
2298 if (!*offset)
2299 goto err_name;
2300
2301 *num = num_counters;
2302
2303 return 0;
2304
2305err_name:
2306 kfree(*name);
2307 return -ENOMEM;
2308}
2309
2310static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
2311 const char **name,
2312 u32 *offset,
2313 bool port)
2314{
2315 int i;
2316 int j;
2317
2318 for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
2319 name[i] = diag_basic[i].name;
2320 offset[i] = diag_basic[i].offset;
2321 }
2322
2323 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
2324 for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
2325 name[j] = diag_ext[i].name;
2326 offset[j] = diag_ext[i].offset;
2327 }
2328 }
2329
2330 if (!port) {
2331 for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
2332 name[j] = diag_device_only[i].name;
2333 offset[j] = diag_device_only[i].offset;
2334 }
2335 }
2336}
2337
2338static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2339{
2340 struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
2341 int i;
2342 int ret;
2343 bool per_port = !!(ibdev->dev->caps.flags2 &
2344 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
2345
Kamal Heib69d269d382016-09-12 19:16:22 +03002346 if (mlx4_is_slave(ibdev->dev))
2347 return 0;
2348
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002349 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2350 /* i == 1 means we are building port counters */
2351 if (i && !per_port)
2352 continue;
2353
2354 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
2355 &diag[i].offset,
2356 &diag[i].num_counters, i);
2357 if (ret)
2358 goto err_alloc;
2359
2360 mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
2361 diag[i].offset, i);
2362 }
2363
2364 ibdev->ib_dev.get_hw_stats = mlx4_ib_get_hw_stats;
2365 ibdev->ib_dev.alloc_hw_stats = mlx4_ib_alloc_hw_stats;
2366
2367 return 0;
2368
2369err_alloc:
2370 if (i) {
2371 kfree(diag[i - 1].name);
2372 kfree(diag[i - 1].offset);
2373 }
2374
2375 return ret;
2376}
2377
2378static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
2379{
2380 int i;
2381
2382 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2383 kfree(ibdev->diag_counters[i].offset);
2384 kfree(ibdev->diag_counters[i].name);
2385 }
2386}
2387
Matan Barak9433c182014-05-15 15:29:28 +03002388#define MLX4_IB_INVALID_MAC ((u64)-1)
2389static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2390 struct net_device *dev,
2391 int port)
2392{
2393 u64 new_smac = 0;
2394 u64 release_mac = MLX4_IB_INVALID_MAC;
2395 struct mlx4_ib_qp *qp;
2396
2397 read_lock(&dev_base_lock);
2398 new_smac = mlx4_mac_to_u64(dev->dev_addr);
2399 read_unlock(&dev_base_lock);
2400
Jack Morgenstein3e0629c2014-09-11 14:11:17 +03002401 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2402
Jack Morgensteind24d9f42014-09-11 14:11:18 +03002403 /* no need for update QP1 and mac registration in non-SRIOV */
2404 if (!mlx4_is_mfunc(ibdev->dev))
2405 return;
2406
Matan Barak9433c182014-05-15 15:29:28 +03002407 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2408 qp = ibdev->qp1_proxy[port - 1];
2409 if (qp) {
2410 int new_smac_index;
Jack Morgenstein25476b02014-09-11 14:11:20 +03002411 u64 old_smac;
Matan Barak9433c182014-05-15 15:29:28 +03002412 struct mlx4_update_qp_params update_params;
2413
Jack Morgenstein25476b02014-09-11 14:11:20 +03002414 mutex_lock(&qp->mutex);
2415 old_smac = qp->pri.smac;
Matan Barak9433c182014-05-15 15:29:28 +03002416 if (new_smac == old_smac)
2417 goto unlock;
2418
2419 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2420
2421 if (new_smac_index < 0)
2422 goto unlock;
2423
2424 update_params.smac_index = new_smac_index;
Matan Barak09e05c32014-09-10 16:41:56 +03002425 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
Matan Barak9433c182014-05-15 15:29:28 +03002426 &update_params)) {
2427 release_mac = new_smac;
2428 goto unlock;
2429 }
Jack Morgenstein25476b02014-09-11 14:11:20 +03002430 /* if old port was zero, no mac was yet registered for this QP */
2431 if (qp->pri.smac_port)
2432 release_mac = old_smac;
Matan Barak9433c182014-05-15 15:29:28 +03002433 qp->pri.smac = new_smac;
Jack Morgenstein25476b02014-09-11 14:11:20 +03002434 qp->pri.smac_port = port;
Matan Barak9433c182014-05-15 15:29:28 +03002435 qp->pri.smac_index = new_smac_index;
Matan Barak9433c182014-05-15 15:29:28 +03002436 }
2437
2438unlock:
Matan Barak9433c182014-05-15 15:29:28 +03002439 if (release_mac != MLX4_IB_INVALID_MAC)
2440 mlx4_unregister_mac(ibdev->dev, port, release_mac);
Jack Morgenstein25476b02014-09-11 14:11:20 +03002441 if (qp)
2442 mutex_unlock(&qp->mutex);
2443 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
Matan Barak9433c182014-05-15 15:29:28 +03002444}
2445
Matan Barak9433c182014-05-15 15:29:28 +03002446static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2447 struct net_device *dev,
2448 unsigned long event)
2449
Moni Shouad487ee72013-12-12 18:03:13 +02002450{
2451 struct mlx4_ib_iboe *iboe;
Matan Barak9433c182014-05-15 15:29:28 +03002452 int update_qps_port = -1;
Moni Shouad487ee72013-12-12 18:03:13 +02002453 int port;
2454
Moni Shoua5070cd22015-07-30 18:33:30 +03002455 ASSERT_RTNL();
2456
Moni Shouad487ee72013-12-12 18:03:13 +02002457 iboe = &ibdev->iboe;
2458
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002459 spin_lock_bh(&iboe->lock);
Moni Shouad487ee72013-12-12 18:03:13 +02002460 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
Moni Shouaad4885d22014-02-05 15:13:02 +02002461
Moni Shouad487ee72013-12-12 18:03:13 +02002462 iboe->netdevs[port - 1] =
2463 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
Moni Shouad487ee72013-12-12 18:03:13 +02002464
Matan Barak9433c182014-05-15 15:29:28 +03002465 if (dev == iboe->netdevs[port - 1] &&
2466 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2467 event == NETDEV_UP || event == NETDEV_CHANGE))
2468 update_qps_port = port;
2469
Moni Shouad487ee72013-12-12 18:03:13 +02002470 }
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002471 spin_unlock_bh(&iboe->lock);
Matan Barak9433c182014-05-15 15:29:28 +03002472
2473 if (update_qps_port > 0)
2474 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
Moni Shouad487ee72013-12-12 18:03:13 +02002475}
2476
2477static int mlx4_ib_netdev_event(struct notifier_block *this,
2478 unsigned long event, void *ptr)
2479{
Jiri Pirko351638e2013-05-28 01:30:21 +00002480 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Eli Cohenfa417f72010-10-24 21:08:52 -07002481 struct mlx4_ib_dev *ibdev;
Eli Cohenfa417f72010-10-24 21:08:52 -07002482
2483 if (!net_eq(dev_net(dev), &init_net))
2484 return NOTIFY_DONE;
2485
2486 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
Matan Barak9433c182014-05-15 15:29:28 +03002487 mlx4_ib_scan_netdevs(ibdev, dev, event);
Eli Cohenfa417f72010-10-24 21:08:52 -07002488
2489 return NOTIFY_DONE;
2490}
2491
Jack Morgenstein54679e12012-08-03 08:40:43 +00002492static void init_pkeys(struct mlx4_ib_dev *ibdev)
2493{
2494 int port;
2495 int slave;
2496 int i;
2497
2498 if (mlx4_is_master(ibdev->dev)) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002499 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2500 ++slave) {
Jack Morgenstein54679e12012-08-03 08:40:43 +00002501 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2502 for (i = 0;
2503 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2504 ++i) {
2505 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2506 /* master has the identity virt2phys pkey mapping */
2507 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2508 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2509 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2510 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2511 }
2512 }
2513 }
2514 /* initialize pkey cache */
2515 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2516 for (i = 0;
2517 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2518 ++i)
2519 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2520 (i) ? 0 : 0xFFFF;
2521 }
2522 }
2523}
2524
Shlomo Pongratze605b742012-04-29 17:04:27 +03002525static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2526{
Matan Barakc66fa192015-05-31 09:30:16 +03002527 int i, j, eq = 0, total_eqs = 0;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002528
Matan Barakc66fa192015-05-31 09:30:16 +03002529 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2530 sizeof(ibdev->eq_table[0]), GFP_KERNEL);
Shlomo Pongratze605b742012-04-29 17:04:27 +03002531 if (!ibdev->eq_table)
2532 return;
2533
Matan Barakc66fa192015-05-31 09:30:16 +03002534 for (i = 1; i <= dev->caps.num_ports; i++) {
2535 for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2536 j++, total_eqs++) {
2537 if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
2538 continue;
2539 ibdev->eq_table[eq] = total_eqs;
2540 if (!mlx4_assign_eq(dev, i,
2541 &ibdev->eq_table[eq]))
2542 eq++;
2543 else
2544 ibdev->eq_table[eq] = -1;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002545 }
2546 }
2547
Matan Barakc66fa192015-05-31 09:30:16 +03002548 for (i = eq; i < dev->caps.num_comp_vectors;
2549 ibdev->eq_table[i++] = -1)
2550 ;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002551
2552 /* Advertise the new number of EQs to clients */
Matan Barakc66fa192015-05-31 09:30:16 +03002553 ibdev->ib_dev.num_comp_vectors = eq;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002554}
2555
2556static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2557{
2558 int i;
Matan Barakc66fa192015-05-31 09:30:16 +03002559 int total_eqs = ibdev->ib_dev.num_comp_vectors;
Shlomo Pongratz3aac6ff2012-05-24 16:08:07 +03002560
Matan Barakc66fa192015-05-31 09:30:16 +03002561 /* no eqs were allocated */
Shlomo Pongratz3aac6ff2012-05-24 16:08:07 +03002562 if (!ibdev->eq_table)
2563 return;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002564
2565 /* Reset the advertised EQ number */
Matan Barakc66fa192015-05-31 09:30:16 +03002566 ibdev->ib_dev.num_comp_vectors = 0;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002567
Matan Barakc66fa192015-05-31 09:30:16 +03002568 for (i = 0; i < total_eqs; i++)
Shlomo Pongratze605b742012-04-29 17:04:27 +03002569 mlx4_release_eq(dev, ibdev->eq_table[i]);
Shlomo Pongratze605b742012-04-29 17:04:27 +03002570
Shlomo Pongratze605b742012-04-29 17:04:27 +03002571 kfree(ibdev->eq_table);
Matan Barakc66fa192015-05-31 09:30:16 +03002572 ibdev->eq_table = NULL;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002573}
2574
Ira Weiny77386132015-05-13 20:02:58 -04002575static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2576 struct ib_port_immutable *immutable)
2577{
2578 struct ib_port_attr attr;
Matan Barak4ed088e2016-01-14 17:50:43 +02002579 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
Ira Weiny77386132015-05-13 20:02:58 -04002580 int err;
2581
Matan Barak4ed088e2016-01-14 17:50:43 +02002582 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
Ira Weinyf9b22e32015-05-13 20:02:59 -04002583 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
Or Gerlitzbc63f9d2017-01-24 13:02:37 +02002584 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
Matan Barak4ed088e2016-01-14 17:50:43 +02002585 } else {
2586 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2587 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2588 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2589 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2590 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
Or Gerlitzbc63f9d2017-01-24 13:02:37 +02002591 immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
2592 if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
2593 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
2594 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
Matan Barak4ed088e2016-01-14 17:50:43 +02002595 }
Ira Weinyf9b22e32015-05-13 20:02:59 -04002596
Or Gerlitzc4550c62017-01-24 13:02:39 +02002597 err = ib_query_port(ibdev, port_num, &attr);
2598 if (err)
2599 return err;
2600
2601 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2602 immutable->gid_tbl_len = attr.gid_tbl_len;
2603
Ira Weiny77386132015-05-13 20:02:58 -04002604 return 0;
2605}
2606
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03002607static void get_fw_ver_str(struct ib_device *device, char *str)
Ira Weinye9db59f2016-06-15 02:22:00 -04002608{
2609 struct mlx4_ib_dev *dev =
2610 container_of(device, struct mlx4_ib_dev, ib_dev);
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03002611 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
Ira Weinye9db59f2016-06-15 02:22:00 -04002612 (int) (dev->dev->caps.fw_ver >> 32),
2613 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
2614 (int) dev->dev->caps.fw_ver & 0xffff);
2615}
2616
Roland Dreier225c7b12007-05-08 18:00:38 -07002617static void *mlx4_ib_add(struct mlx4_dev *dev)
2618{
2619 struct mlx4_ib_dev *ibdev;
Roland Dreier22e7ef92009-01-09 13:22:29 -08002620 int num_ports = 0;
Jack Morgenstein035b1032012-05-10 23:28:09 +03002621 int i, j;
Eli Cohenfa417f72010-10-24 21:08:52 -07002622 int err;
2623 struct mlx4_ib_iboe *iboe;
Matan Barak41966702014-02-02 17:06:47 +02002624 int ib_num_ports = 0;
Moni Shouaa5750092015-02-03 16:48:37 +02002625 int num_req_counters;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002626 int allocated;
2627 u32 counter_index;
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002628 struct counter_index *new_counter_index = NULL;
Roland Dreier225c7b12007-05-08 18:00:38 -07002629
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03002630 pr_info_once("%s", mlx4_ib_version);
Roland Dreier68f39482008-02-04 20:20:44 -08002631
Jack Morgenstein026149c2012-08-03 08:40:55 +00002632 num_ports = 0;
Eli Cohenfa417f72010-10-24 21:08:52 -07002633 mlx4_foreach_ib_transport_port(i, dev)
Roland Dreier22e7ef92009-01-09 13:22:29 -08002634 num_ports++;
2635
2636 /* No point in registering a device with no ports... */
2637 if (num_ports == 0)
2638 return NULL;
2639
Roland Dreier225c7b12007-05-08 18:00:38 -07002640 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
2641 if (!ibdev) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002642 dev_err(&dev->persist->pdev->dev,
2643 "Device struct alloc failed\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07002644 return NULL;
2645 }
2646
Eli Cohenfa417f72010-10-24 21:08:52 -07002647 iboe = &ibdev->iboe;
2648
Roland Dreier225c7b12007-05-08 18:00:38 -07002649 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2650 goto err_dealloc;
2651
2652 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2653 goto err_pd;
2654
Roland Dreier4979d182011-01-12 09:50:36 -08002655 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2656 PAGE_SIZE);
Roland Dreier225c7b12007-05-08 18:00:38 -07002657 if (!ibdev->uar_map)
2658 goto err_uar;
Jack Morgenstein26c6bc72007-05-13 17:18:23 +03002659 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
Roland Dreier225c7b12007-05-08 18:00:38 -07002660
Roland Dreier225c7b12007-05-08 18:00:38 -07002661 ibdev->dev = dev;
Moni Shouac6215742015-02-03 16:48:39 +02002662 ibdev->bond_next_port = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002663
2664 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2665 ibdev->ib_dev.owner = THIS_MODULE;
2666 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
Roland Dreier95d04f02008-07-23 08:12:26 -07002667 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
Roland Dreier22e7ef92009-01-09 13:22:29 -08002668 ibdev->num_ports = num_ports;
Moni Shouaa5750092015-02-03 16:48:37 +02002669 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
2670 1 : ibdev->num_ports;
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -08002671 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
Bart Van Assched66c88a82017-01-20 13:04:20 -08002672 ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev;
Moni Shoua5070cd22015-07-30 18:33:30 +03002673 ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev;
2674 ibdev->ib_dev.add_gid = mlx4_ib_add_gid;
2675 ibdev->ib_dev.del_gid = mlx4_ib_del_gid;
Roland Dreier225c7b12007-05-08 18:00:38 -07002676
Or Gerlitz08ff3232012-10-21 14:59:24 +00002677 if (dev->caps.userspace_caps)
2678 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2679 else
2680 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2681
Roland Dreier225c7b12007-05-08 18:00:38 -07002682 ibdev->ib_dev.uverbs_cmd_mask =
2683 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2684 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2685 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2686 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2687 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2688 (1ull << IB_USER_VERBS_CMD_REG_MR) |
Matan Barak93769322014-07-31 11:01:30 +03002689 (1ull << IB_USER_VERBS_CMD_REREG_MR) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002690 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2691 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2692 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -07002693 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002694 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2695 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2696 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002697 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002698 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2699 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2700 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2701 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2702 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
Jack Morgenstein65541cb2007-06-21 13:03:11 +03002703 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
Sean Hefty18abd5e2011-06-02 10:43:26 -07002704 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
Sean Hefty42849b22011-08-11 13:57:43 -07002705 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
2706 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
Roland Dreier225c7b12007-05-08 18:00:38 -07002707
2708 ibdev->ib_dev.query_device = mlx4_ib_query_device;
2709 ibdev->ib_dev.query_port = mlx4_ib_query_port;
Eli Cohenfa417f72010-10-24 21:08:52 -07002710 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
Roland Dreier225c7b12007-05-08 18:00:38 -07002711 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
2712 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
2713 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
2714 ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
2715 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
2716 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
2717 ibdev->ib_dev.mmap = mlx4_ib_mmap;
2718 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
2719 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
2720 ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
2721 ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
2722 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
2723 ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
2724 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
Jack Morgenstein65541cb2007-06-21 13:03:11 +03002725 ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
Roland Dreier225c7b12007-05-08 18:00:38 -07002726 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
2727 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
2728 ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
2729 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002730 ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
Roland Dreier225c7b12007-05-08 18:00:38 -07002731 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
2732 ibdev->ib_dev.post_send = mlx4_ib_post_send;
2733 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
2734 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
Eli Cohen3fdcb972008-04-16 21:09:33 -07002735 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -07002736 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
Roland Dreier225c7b12007-05-08 18:00:38 -07002737 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
2738 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
2739 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
2740 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
2741 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
Matan Barak93769322014-07-31 11:01:30 +03002742 ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
Roland Dreier225c7b12007-05-08 18:00:38 -07002743 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
Sagi Grimberg679e34d2015-07-30 10:32:42 +03002744 ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr;
Sagi Grimberg1b2cd0f2015-10-13 19:11:27 +03002745 ibdev->ib_dev.map_mr_sg = mlx4_ib_map_mr_sg;
Roland Dreier225c7b12007-05-08 18:00:38 -07002746 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
2747 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
2748 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
Ira Weiny77386132015-05-13 20:02:58 -04002749 ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
Ira Weinye9db59f2016-06-15 02:22:00 -04002750 ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str;
Yishai Hadasae184dd2015-08-13 18:32:06 +03002751 ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
Roland Dreier225c7b12007-05-08 18:00:38 -07002752
Guy Levi400b1eb2017-07-04 16:24:24 +03002753 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
2754 ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
2755 IB_LINK_LAYER_ETHERNET) ||
2756 (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
2757 IB_LINK_LAYER_ETHERNET))) {
2758 ibdev->ib_dev.create_wq = mlx4_ib_create_wq;
2759 ibdev->ib_dev.modify_wq = mlx4_ib_modify_wq;
2760 ibdev->ib_dev.destroy_wq = mlx4_ib_destroy_wq;
Guy Levib8d46ca2017-07-04 16:24:25 +03002761 ibdev->ib_dev.create_rwq_ind_table =
2762 mlx4_ib_create_rwq_ind_table;
2763 ibdev->ib_dev.destroy_rwq_ind_table =
2764 mlx4_ib_destroy_rwq_ind_table;
Guy Levi400b1eb2017-07-04 16:24:24 +03002765 ibdev->ib_dev.uverbs_ex_cmd_mask |=
Guy Levib8d46ca2017-07-04 16:24:25 +03002766 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
2767 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
2768 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
2769 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
2770 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
Guy Levi400b1eb2017-07-04 16:24:24 +03002771 }
2772
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00002773 if (!mlx4_is_slave(ibdev->dev)) {
2774 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
2775 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
2776 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
2777 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
2778 }
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +03002779
Shani Michaelib4253882013-02-06 16:19:16 +00002780 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2781 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2782 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
Shani Michaelib4253882013-02-06 16:19:16 +00002783 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2784
2785 ibdev->ib_dev.uverbs_cmd_mask |=
2786 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2787 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2788 }
2789
Sean Hefty012a8ff2011-06-02 09:01:33 -07002790 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2791 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2792 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2793 ibdev->ib_dev.uverbs_cmd_mask |=
2794 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2795 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2796 }
2797
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002798 if (check_flow_steering_support(dev)) {
Matan Barak0a9b7d52013-11-07 15:25:15 +02002799 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002800 ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
2801 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
2802
Yann Droneaudf21519b2013-11-06 23:21:49 +01002803 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2804 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2805 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002806 }
2807
Matan Barak4b664c42015-06-11 16:35:27 +03002808 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2809 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
Eran Ben Elishafbfb6622015-10-15 14:44:42 +03002810 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
2811 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
Matan Barak4b664c42015-06-11 16:35:27 +03002812
Shlomo Pongratze605b742012-04-29 17:04:27 +03002813 mlx4_ib_alloc_eqs(dev, ibdev);
2814
Eli Cohenfa417f72010-10-24 21:08:52 -07002815 spin_lock_init(&iboe->lock);
2816
Roland Dreier225c7b12007-05-08 18:00:38 -07002817 if (init_node_data(ibdev))
2818 goto err_map;
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03002819 mlx4_init_sl2vl_tbl(ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -07002820
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002821 for (i = 0; i < ibdev->num_ports; ++i) {
2822 mutex_init(&ibdev->counters_table[i].mutex);
2823 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2824 }
2825
Moni Shouaa5750092015-02-03 16:48:37 +02002826 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2827 for (i = 0; i < num_req_counters; ++i) {
Matan Barak9433c182014-05-15 15:29:28 +03002828 mutex_init(&ibdev->qp1_proxy_lock[i]);
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002829 allocated = 0;
Or Gerlitzcfcde112011-06-15 14:49:57 +00002830 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2831 IB_LINK_LAYER_ETHERNET) {
Moshe Shemeshf3301872017-06-21 09:29:36 +03002832 err = mlx4_counter_alloc(ibdev->dev, &counter_index,
2833 MLX4_RES_USAGE_DRIVER);
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002834 /* if failed to allocate a new counter, use default */
Or Gerlitzcfcde112011-06-15 14:49:57 +00002835 if (err)
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002836 counter_index =
2837 mlx4_get_default_counter_index(dev,
2838 i + 1);
2839 else
2840 allocated = 1;
2841 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2842 counter_index = mlx4_get_default_counter_index(dev,
2843 i + 1);
Dan Carpenter3839d8a2014-03-28 11:21:39 +03002844 }
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002845 new_counter_index = kmalloc(sizeof(*new_counter_index),
2846 GFP_KERNEL);
2847 if (!new_counter_index) {
2848 if (allocated)
2849 mlx4_counter_free(ibdev->dev, counter_index);
2850 goto err_counter;
2851 }
2852 new_counter_index->index = counter_index;
2853 new_counter_index->allocated = allocated;
2854 list_add_tail(&new_counter_index->list,
2855 &ibdev->counters_table[i].counters_list);
2856 ibdev->counters_table[i].default_counter = counter_index;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002857 pr_info("counter index %d for port %d allocated %d\n",
2858 counter_index, i + 1, allocated);
Or Gerlitzcfcde112011-06-15 14:49:57 +00002859 }
Moni Shouaa5750092015-02-03 16:48:37 +02002860 if (mlx4_is_bonded(dev))
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002861 for (i = 1; i < ibdev->num_ports ; ++i) {
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002862 new_counter_index =
2863 kmalloc(sizeof(struct counter_index),
2864 GFP_KERNEL);
2865 if (!new_counter_index)
2866 goto err_counter;
2867 new_counter_index->index = counter_index;
2868 new_counter_index->allocated = 0;
2869 list_add_tail(&new_counter_index->list,
2870 &ibdev->counters_table[i].counters_list);
2871 ibdev->counters_table[i].default_counter =
2872 counter_index;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002873 }
Or Gerlitzcfcde112011-06-15 14:49:57 +00002874
Matan Barak41966702014-02-02 17:06:47 +02002875 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2876 ib_num_ports++;
2877
Roland Dreier225c7b12007-05-08 18:00:38 -07002878 spin_lock_init(&ibdev->sm_lock);
2879 mutex_init(&ibdev->cap_mask_mutex);
Yishai Hadas35f05da2015-02-08 11:49:34 +02002880 INIT_LIST_HEAD(&ibdev->qp_list);
2881 spin_lock_init(&ibdev->reset_flow_resource_lock);
Roland Dreier225c7b12007-05-08 18:00:38 -07002882
Matan Barak41966702014-02-02 17:06:47 +02002883 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2884 ib_num_ports) {
Matan Barakc1c98502013-11-07 15:25:17 +02002885 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2886 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2887 MLX4_IB_UC_STEER_QPN_ALIGN,
Moshe Shemeshf3301872017-06-21 09:29:36 +03002888 &ibdev->steer_qpn_base, 0,
2889 MLX4_RES_USAGE_DRIVER);
Matan Barakc1c98502013-11-07 15:25:17 +02002890 if (err)
2891 goto err_counter;
2892
2893 ibdev->ib_uc_qpns_bitmap =
2894 kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2895 sizeof(long),
2896 GFP_KERNEL);
Leon Romanovsky15d46262016-11-03 16:44:12 +02002897 if (!ibdev->ib_uc_qpns_bitmap)
Matan Barakc1c98502013-11-07 15:25:17 +02002898 goto err_steer_qp_release;
Matan Barakc1c98502013-11-07 15:25:17 +02002899
Eran Ben Elisha1f22e452016-11-10 11:31:00 +02002900 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
2901 bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2902 ibdev->steer_qpn_count);
2903 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2904 dev, ibdev->steer_qpn_base,
2905 ibdev->steer_qpn_base +
2906 ibdev->steer_qpn_count - 1);
2907 if (err)
2908 goto err_steer_free_bitmap;
2909 } else {
2910 bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2911 ibdev->steer_qpn_count);
2912 }
Matan Barakc1c98502013-11-07 15:25:17 +02002913 }
2914
Jack Morgenstein3e0629c2014-09-11 14:11:17 +03002915 for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2916 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2917
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002918 if (mlx4_ib_alloc_diag_counters(ibdev))
Matan Barakc1c98502013-11-07 15:25:17 +02002919 goto err_steer_free_bitmap;
Roland Dreier225c7b12007-05-08 18:00:38 -07002920
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002921 if (ib_register_device(&ibdev->ib_dev, NULL))
2922 goto err_diag_counters;
2923
Roland Dreier225c7b12007-05-08 18:00:38 -07002924 if (mlx4_ib_mad_init(ibdev))
2925 goto err_reg;
2926
Jack Morgensteinfc065732012-08-03 08:40:42 +00002927 if (mlx4_ib_init_sriov(ibdev))
2928 goto err_mad;
2929
Majd Dibbinydd77abf2017-03-19 11:01:28 +02002930 if (!iboe->nb.notifier_call) {
2931 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2932 err = register_netdevice_notifier(&iboe->nb);
2933 if (err) {
2934 iboe->nb.notifier_call = NULL;
2935 goto err_notif;
Moni Shouad487ee72013-12-12 18:03:13 +02002936 }
Majd Dibbinydd77abf2017-03-19 11:01:28 +02002937 }
2938 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2939 err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2940 if (err)
2941 goto err_notif;
Eli Cohenfa417f72010-10-24 21:08:52 -07002942 }
2943
Jack Morgenstein035b1032012-05-10 23:28:09 +03002944 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002945 if (device_create_file(&ibdev->ib_dev.dev,
Jack Morgenstein035b1032012-05-10 23:28:09 +03002946 mlx4_class_attributes[j]))
Eli Cohenfa417f72010-10-24 21:08:52 -07002947 goto err_notif;
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002948 }
2949
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07002950 ibdev->ib_active = true;
Jiri Pirko09d4d082016-02-26 17:32:24 +01002951 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2952 devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
2953 &ibdev->ib_dev);
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07002954
Jack Morgenstein54679e12012-08-03 08:40:43 +00002955 if (mlx4_is_mfunc(ibdev->dev))
2956 init_pkeys(ibdev);
2957
Jack Morgenstein3806d082012-08-03 08:40:58 +00002958 /* create paravirt contexts for any VFs which are active */
2959 if (mlx4_is_master(ibdev->dev)) {
2960 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2961 if (j == mlx4_master_func_num(ibdev->dev))
2962 continue;
2963 if (mlx4_is_slave_active(ibdev->dev, j))
2964 do_slave_init(ibdev, j, 1);
2965 }
2966 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002967 return ibdev;
2968
Eli Cohenfa417f72010-10-24 21:08:52 -07002969err_notif:
Moni Shouad487ee72013-12-12 18:03:13 +02002970 if (ibdev->iboe.nb.notifier_call) {
2971 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2972 pr_warn("failure unregistering notifier\n");
2973 ibdev->iboe.nb.notifier_call = NULL;
2974 }
Eli Cohenfa417f72010-10-24 21:08:52 -07002975 flush_workqueue(wq);
2976
Jack Morgensteinfc065732012-08-03 08:40:42 +00002977 mlx4_ib_close_sriov(ibdev);
2978
2979err_mad:
2980 mlx4_ib_mad_cleanup(ibdev);
2981
Roland Dreier225c7b12007-05-08 18:00:38 -07002982err_reg:
2983 ib_unregister_device(&ibdev->ib_dev);
2984
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002985err_diag_counters:
2986 mlx4_ib_diag_cleanup(ibdev);
2987
Matan Barakc1c98502013-11-07 15:25:17 +02002988err_steer_free_bitmap:
2989 kfree(ibdev->ib_uc_qpns_bitmap);
2990
2991err_steer_qp_release:
2992 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
2993 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2994 ibdev->steer_qpn_count);
Or Gerlitzcfcde112011-06-15 14:49:57 +00002995err_counter:
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002996 for (i = 0; i < ibdev->num_ports; ++i)
2997 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2998
Roland Dreier225c7b12007-05-08 18:00:38 -07002999err_map:
Jack Morgenstein99e68902017-03-21 12:57:05 +02003000 mlx4_ib_free_eqs(dev, ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -07003001 iounmap(ibdev->uar_map);
3002
3003err_uar:
3004 mlx4_uar_free(dev, &ibdev->priv_uar);
3005
3006err_pd:
3007 mlx4_pd_free(dev, ibdev->priv_pdn);
3008
3009err_dealloc:
3010 ib_dealloc_device(&ibdev->ib_dev);
3011
3012 return NULL;
3013}
3014
Matan Barakc1c98502013-11-07 15:25:17 +02003015int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
3016{
3017 int offset;
3018
3019 WARN_ON(!dev->ib_uc_qpns_bitmap);
3020
3021 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
3022 dev->steer_qpn_count,
3023 get_count_order(count));
3024 if (offset < 0)
3025 return offset;
3026
3027 *qpn = dev->steer_qpn_base + offset;
3028 return 0;
3029}
3030
3031void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
3032{
3033 if (!qpn ||
3034 dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
3035 return;
3036
3037 BUG_ON(qpn < dev->steer_qpn_base);
3038
3039 bitmap_release_region(dev->ib_uc_qpns_bitmap,
3040 qpn - dev->steer_qpn_base,
3041 get_count_order(count));
3042}
3043
3044int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
3045 int is_attach)
3046{
3047 int err;
3048 size_t flow_size;
3049 struct ib_flow_attr *flow = NULL;
3050 struct ib_flow_spec_ib *ib_spec;
3051
3052 if (is_attach) {
3053 flow_size = sizeof(struct ib_flow_attr) +
3054 sizeof(struct ib_flow_spec_ib);
3055 flow = kzalloc(flow_size, GFP_KERNEL);
3056 if (!flow)
3057 return -ENOMEM;
3058 flow->port = mqp->port;
3059 flow->num_of_specs = 1;
3060 flow->size = flow_size;
3061 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
3062 ib_spec->type = IB_FLOW_SPEC_IB;
3063 ib_spec->size = sizeof(struct ib_flow_spec_ib);
3064 /* Add an empty rule for IB L2 */
3065 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
3066
3067 err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
3068 IB_FLOW_DOMAIN_NIC,
3069 MLX4_FS_REGULAR,
3070 &mqp->reg_id);
3071 } else {
3072 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
3073 }
3074 kfree(flow);
3075 return err;
3076}
3077
Roland Dreier225c7b12007-05-08 18:00:38 -07003078static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
3079{
3080 struct mlx4_ib_dev *ibdev = ibdev_ptr;
3081 int p;
Jiri Pirko09d4d082016-02-26 17:32:24 +01003082 int i;
Roland Dreier225c7b12007-05-08 18:00:38 -07003083
Jiri Pirko09d4d082016-02-26 17:32:24 +01003084 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
3085 devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
Moni Shoua4bf97152014-08-21 14:28:42 +03003086 ibdev->ib_active = false;
3087 flush_workqueue(wq);
3088
Jack Morgensteinfc065732012-08-03 08:40:42 +00003089 mlx4_ib_close_sriov(ibdev);
Yevgeny Petrilina6a47772009-03-18 19:49:54 -07003090 mlx4_ib_mad_cleanup(ibdev);
3091 ib_unregister_device(&ibdev->ib_dev);
Mark Bloch3f85f2a2016-07-19 20:54:58 +03003092 mlx4_ib_diag_cleanup(ibdev);
Eli Cohenfa417f72010-10-24 21:08:52 -07003093 if (ibdev->iboe.nb.notifier_call) {
3094 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03003095 pr_warn("failure unregistering notifier\n");
Eli Cohenfa417f72010-10-24 21:08:52 -07003096 ibdev->iboe.nb.notifier_call = NULL;
3097 }
Matan Barakc1c98502013-11-07 15:25:17 +02003098
3099 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3100 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
3101 ibdev->steer_qpn_count);
3102 kfree(ibdev->ib_uc_qpns_bitmap);
3103 }
3104
Eli Cohenfa417f72010-10-24 21:08:52 -07003105 iounmap(ibdev->uar_map);
Or Gerlitzcfcde112011-06-15 14:49:57 +00003106 for (p = 0; p < ibdev->num_ports; ++p)
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03003107 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
3108
Eli Cohenfa417f72010-10-24 21:08:52 -07003109 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
Roland Dreier225c7b12007-05-08 18:00:38 -07003110 mlx4_CLOSE_PORT(dev, p);
3111
Shlomo Pongratze605b742012-04-29 17:04:27 +03003112 mlx4_ib_free_eqs(dev, ibdev);
3113
Roland Dreier225c7b12007-05-08 18:00:38 -07003114 mlx4_uar_free(dev, &ibdev->priv_uar);
3115 mlx4_pd_free(dev, ibdev->priv_pdn);
3116 ib_dealloc_device(&ibdev->ib_dev);
3117}
3118
Jack Morgensteinfc065732012-08-03 08:40:42 +00003119static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
3120{
3121 struct mlx4_ib_demux_work **dm = NULL;
3122 struct mlx4_dev *dev = ibdev->dev;
3123 int i;
3124 unsigned long flags;
Matan Barak449fc482014-03-19 18:11:52 +02003125 struct mlx4_active_ports actv_ports;
3126 unsigned int ports;
3127 unsigned int first_port;
Jack Morgensteinfc065732012-08-03 08:40:42 +00003128
3129 if (!mlx4_is_master(dev))
3130 return;
3131
Matan Barak449fc482014-03-19 18:11:52 +02003132 actv_ports = mlx4_get_active_ports(dev, slave);
3133 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
3134 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
3135
3136 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
Leon Romanovsky15d46262016-11-03 16:44:12 +02003137 if (!dm)
Maninder Singha39a98f2015-07-08 09:43:35 +05303138 return;
Jack Morgensteinfc065732012-08-03 08:40:42 +00003139
Matan Barak449fc482014-03-19 18:11:52 +02003140 for (i = 0; i < ports; i++) {
Jack Morgensteinfc065732012-08-03 08:40:42 +00003141 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
3142 if (!dm[i]) {
Maninder Singha39a98f2015-07-08 09:43:35 +05303143 while (--i >= 0)
3144 kfree(dm[i]);
Jack Morgensteinfc065732012-08-03 08:40:42 +00003145 goto out;
3146 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00003147 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
Matan Barak449fc482014-03-19 18:11:52 +02003148 dm[i]->port = first_port + i + 1;
Jack Morgensteinfc065732012-08-03 08:40:42 +00003149 dm[i]->slave = slave;
3150 dm[i]->do_init = do_init;
3151 dm[i]->dev = ibdev;
Doug Ledfordd9a047a2015-07-09 10:21:08 -04003152 }
3153 /* initialize or tear down tunnel QPs for the slave */
3154 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
3155 if (!ibdev->sriov.is_going_down) {
3156 for (i = 0; i < ports; i++)
Jack Morgensteinfc065732012-08-03 08:40:42 +00003157 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
3158 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
Doug Ledfordd9a047a2015-07-09 10:21:08 -04003159 } else {
3160 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3161 for (i = 0; i < ports; i++)
3162 kfree(dm[i]);
Jack Morgensteinfc065732012-08-03 08:40:42 +00003163 }
3164out:
Syam Sidhardhanc89d1272013-02-24 23:20:05 +00003165 kfree(dm);
Jack Morgensteinfc065732012-08-03 08:40:42 +00003166 return;
3167}
3168
Yishai Hadas35f05da2015-02-08 11:49:34 +02003169static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
3170{
3171 struct mlx4_ib_qp *mqp;
3172 unsigned long flags_qp;
3173 unsigned long flags_cq;
3174 struct mlx4_ib_cq *send_mcq, *recv_mcq;
3175 struct list_head cq_notify_list;
3176 struct mlx4_cq *mcq;
3177 unsigned long flags;
3178
3179 pr_warn("mlx4_ib_handle_catas_error was started\n");
3180 INIT_LIST_HEAD(&cq_notify_list);
3181
3182 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3183 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3184
3185 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3186 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
3187 if (mqp->sq.tail != mqp->sq.head) {
3188 send_mcq = to_mcq(mqp->ibqp.send_cq);
3189 spin_lock_irqsave(&send_mcq->lock, flags_cq);
3190 if (send_mcq->mcq.comp &&
3191 mqp->ibqp.send_cq->comp_handler) {
3192 if (!send_mcq->mcq.reset_notify_added) {
3193 send_mcq->mcq.reset_notify_added = 1;
3194 list_add_tail(&send_mcq->mcq.reset_notify,
3195 &cq_notify_list);
3196 }
3197 }
3198 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
3199 }
3200 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
3201 /* Now, handle the QP's receive queue */
3202 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
3203 /* no handling is needed for SRQ */
3204 if (!mqp->ibqp.srq) {
3205 if (mqp->rq.tail != mqp->rq.head) {
3206 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
3207 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
3208 if (recv_mcq->mcq.comp &&
3209 mqp->ibqp.recv_cq->comp_handler) {
3210 if (!recv_mcq->mcq.reset_notify_added) {
3211 recv_mcq->mcq.reset_notify_added = 1;
3212 list_add_tail(&recv_mcq->mcq.reset_notify,
3213 &cq_notify_list);
3214 }
3215 }
3216 spin_unlock_irqrestore(&recv_mcq->lock,
3217 flags_cq);
3218 }
3219 }
3220 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
3221 }
3222
3223 list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
3224 mcq->comp(mcq);
3225 }
3226 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3227 pr_warn("mlx4_ib_handle_catas_error ended\n");
3228}
3229
Moni Shouaa5750092015-02-03 16:48:37 +02003230static void handle_bonded_port_state_event(struct work_struct *work)
3231{
3232 struct ib_event_work *ew =
3233 container_of(work, struct ib_event_work, work);
3234 struct mlx4_ib_dev *ibdev = ew->ib_dev;
3235 enum ib_port_state bonded_port_state = IB_PORT_NOP;
3236 int i;
3237 struct ib_event ibev;
3238
3239 kfree(ew);
3240 spin_lock_bh(&ibdev->iboe.lock);
3241 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
3242 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
Moni Shoua217e8b12015-03-18 16:51:35 +02003243 enum ib_port_state curr_port_state;
Moni Shouaa5750092015-02-03 16:48:37 +02003244
Moni Shoua217e8b12015-03-18 16:51:35 +02003245 if (!curr_netdev)
3246 continue;
3247
3248 curr_port_state =
Moni Shouaa5750092015-02-03 16:48:37 +02003249 (netif_running(curr_netdev) &&
3250 netif_carrier_ok(curr_netdev)) ?
3251 IB_PORT_ACTIVE : IB_PORT_DOWN;
3252
3253 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
3254 curr_port_state : IB_PORT_ACTIVE;
3255 }
3256 spin_unlock_bh(&ibdev->iboe.lock);
3257
3258 ibev.device = &ibdev->ib_dev;
3259 ibev.element.port_num = 1;
3260 ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
3261 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3262
3263 ib_dispatch_event(&ibev);
3264}
3265
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03003266void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
3267{
3268 u64 sl2vl;
3269 int err;
3270
3271 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
3272 if (err) {
3273 pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n",
3274 port, err);
3275 sl2vl = 0;
3276 }
3277 atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
3278}
3279
3280static void ib_sl2vl_update_work(struct work_struct *work)
3281{
3282 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
3283 struct mlx4_ib_dev *mdev = ew->ib_dev;
3284 int port = ew->port;
3285
3286 mlx4_ib_sl2vl_update(mdev, port);
3287
3288 kfree(ew);
3289}
3290
3291void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
3292 int port)
3293{
3294 struct ib_event_work *ew;
3295
3296 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3297 if (ew) {
3298 INIT_WORK(&ew->work, ib_sl2vl_update_work);
3299 ew->port = port;
3300 ew->ib_dev = ibdev;
3301 queue_work(wq, &ew->work);
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03003302 }
3303}
3304
Roland Dreier225c7b12007-05-08 18:00:38 -07003305static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003306 enum mlx4_dev_event event, unsigned long param)
Roland Dreier225c7b12007-05-08 18:00:38 -07003307{
3308 struct ib_event ibev;
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07003309 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003310 struct mlx4_eqe *eqe = NULL;
3311 struct ib_event_work *ew;
Jack Morgensteinfc065732012-08-03 08:40:42 +00003312 int p = 0;
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003313
Moni Shouaa5750092015-02-03 16:48:37 +02003314 if (mlx4_is_bonded(dev) &&
3315 ((event == MLX4_DEV_EVENT_PORT_UP) ||
3316 (event == MLX4_DEV_EVENT_PORT_DOWN))) {
3317 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3318 if (!ew)
3319 return;
3320 INIT_WORK(&ew->work, handle_bonded_port_state_event);
3321 ew->ib_dev = ibdev;
3322 queue_work(wq, &ew->work);
3323 return;
3324 }
3325
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003326 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
3327 eqe = (struct mlx4_eqe *)param;
3328 else
Jack Morgensteinfc065732012-08-03 08:40:42 +00003329 p = (int) param;
Roland Dreier225c7b12007-05-08 18:00:38 -07003330
3331 switch (event) {
Roland Dreier37608ee2008-04-16 21:01:08 -07003332 case MLX4_DEV_EVENT_PORT_UP:
Jack Morgensteinfc065732012-08-03 08:40:42 +00003333 if (p > ibdev->num_ports)
3334 return;
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03003335 if (!mlx4_is_slave(dev) &&
Jack Morgensteina0c64a12012-08-03 08:40:49 +00003336 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
3337 IB_LINK_LAYER_INFINIBAND) {
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03003338 if (mlx4_is_master(dev))
3339 mlx4_ib_invalidate_all_guid_record(ibdev, p);
3340 if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
3341 !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
3342 mlx4_sched_ib_sl2vl_update_work(ibdev, p);
Jack Morgensteina0c64a12012-08-03 08:40:49 +00003343 }
Roland Dreier37608ee2008-04-16 21:01:08 -07003344 ibev.event = IB_EVENT_PORT_ACTIVE;
Roland Dreier225c7b12007-05-08 18:00:38 -07003345 break;
3346
Roland Dreier37608ee2008-04-16 21:01:08 -07003347 case MLX4_DEV_EVENT_PORT_DOWN:
Jack Morgensteinfc065732012-08-03 08:40:42 +00003348 if (p > ibdev->num_ports)
3349 return;
Roland Dreier37608ee2008-04-16 21:01:08 -07003350 ibev.event = IB_EVENT_PORT_ERR;
3351 break;
3352
3353 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07003354 ibdev->ib_active = false;
Roland Dreier225c7b12007-05-08 18:00:38 -07003355 ibev.event = IB_EVENT_DEVICE_FATAL;
Yishai Hadas35f05da2015-02-08 11:49:34 +02003356 mlx4_ib_handle_catas_error(ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -07003357 break;
3358
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003359 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
3360 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
Leon Romanovsky15d46262016-11-03 16:44:12 +02003361 if (!ew)
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003362 break;
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003363
3364 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
3365 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
3366 ew->ib_dev = ibdev;
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00003367 /* need to queue only for port owner, which uses GEN_EQE */
3368 if (mlx4_is_master(dev))
3369 queue_work(wq, &ew->work);
3370 else
3371 handle_port_mgmt_change_event(&ew->work);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003372 return;
3373
Jack Morgensteinfc065732012-08-03 08:40:42 +00003374 case MLX4_DEV_EVENT_SLAVE_INIT:
3375 /* here, p is the slave id */
3376 do_slave_init(ibdev, p, 1);
Yishai Hadasee59fa02015-03-03 17:28:49 +02003377 if (mlx4_is_master(dev)) {
3378 int i;
3379
3380 for (i = 1; i <= ibdev->num_ports; i++) {
3381 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3382 == IB_LINK_LAYER_INFINIBAND)
3383 mlx4_ib_slave_alias_guid_event(ibdev,
3384 p, i,
3385 1);
3386 }
3387 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00003388 return;
3389
3390 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
Yishai Hadasee59fa02015-03-03 17:28:49 +02003391 if (mlx4_is_master(dev)) {
3392 int i;
3393
3394 for (i = 1; i <= ibdev->num_ports; i++) {
3395 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3396 == IB_LINK_LAYER_INFINIBAND)
3397 mlx4_ib_slave_alias_guid_event(ibdev,
3398 p, i,
3399 0);
3400 }
3401 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00003402 /* here, p is the slave id */
3403 do_slave_init(ibdev, p, 0);
3404 return;
3405
Roland Dreier225c7b12007-05-08 18:00:38 -07003406 default:
3407 return;
3408 }
3409
3410 ibev.device = ibdev_ptr;
Moni Shouaa5750092015-02-03 16:48:37 +02003411 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
Roland Dreier225c7b12007-05-08 18:00:38 -07003412
3413 ib_dispatch_event(&ibev);
3414}
3415
3416static struct mlx4_interface mlx4_ib_interface = {
Eli Cohenfa417f72010-10-24 21:08:52 -07003417 .add = mlx4_ib_add,
3418 .remove = mlx4_ib_remove,
3419 .event = mlx4_ib_event,
Moni Shouaa5750092015-02-03 16:48:37 +02003420 .protocol = MLX4_PROT_IB_IPV6,
3421 .flags = MLX4_INTFF_BONDING
Roland Dreier225c7b12007-05-08 18:00:38 -07003422};
3423
3424static int __init mlx4_ib_init(void)
3425{
Eli Cohenfa417f72010-10-24 21:08:52 -07003426 int err;
3427
Bhaktipriya Shridhar41cd3942016-08-15 23:42:48 +05303428 wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
Eli Cohenfa417f72010-10-24 21:08:52 -07003429 if (!wq)
3430 return -ENOMEM;
3431
Oren Duerb9c5d6a2012-08-03 08:40:46 +00003432 err = mlx4_ib_mcg_init();
3433 if (err)
3434 goto clean_wq;
3435
Eli Cohenfa417f72010-10-24 21:08:52 -07003436 err = mlx4_register_interface(&mlx4_ib_interface);
Oren Duerb9c5d6a2012-08-03 08:40:46 +00003437 if (err)
3438 goto clean_mcg;
Eli Cohenfa417f72010-10-24 21:08:52 -07003439
3440 return 0;
Oren Duerb9c5d6a2012-08-03 08:40:46 +00003441
3442clean_mcg:
3443 mlx4_ib_mcg_destroy();
3444
3445clean_wq:
3446 destroy_workqueue(wq);
3447 return err;
Roland Dreier225c7b12007-05-08 18:00:38 -07003448}
3449
3450static void __exit mlx4_ib_cleanup(void)
3451{
3452 mlx4_unregister_interface(&mlx4_ib_interface);
Oren Duerb9c5d6a2012-08-03 08:40:46 +00003453 mlx4_ib_mcg_destroy();
Eli Cohenfa417f72010-10-24 21:08:52 -07003454 destroy_workqueue(wq);
Roland Dreier225c7b12007-05-08 18:00:38 -07003455}
3456
3457module_init(mlx4_ib_init);
3458module_exit(mlx4_ib_cleanup);