blob: c636842c5be0e07ac976b094ac2d70d61d3b768b [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07003 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070037#include <linux/errno.h>
Eli Cohenfa417f72010-10-24 21:08:52 -070038#include <linux/netdevice.h>
39#include <linux/inetdevice.h>
40#include <linux/rtnetlink.h>
Eli Cohen4c3eb3c2010-08-26 17:19:22 +030041#include <linux/if_vlan.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010042#include <linux/sched/mm.h>
Ingo Molnar0881e7b2017-02-05 15:30:50 +010043#include <linux/sched/task.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010044
Moni Shouad487ee72013-12-12 18:03:13 +020045#include <net/ipv6.h>
46#include <net/addrconf.h>
Jiri Pirko09d4d082016-02-26 17:32:24 +010047#include <net/devlink.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070048
49#include <rdma/ib_smi.h>
50#include <rdma/ib_user_verbs.h>
Eli Cohenfa417f72010-10-24 21:08:52 -070051#include <rdma/ib_addr.h>
Moni Shouae26be1b2015-07-30 18:33:29 +030052#include <rdma/ib_cache.h>
53
54#include <net/bonding.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070055
56#include <linux/mlx4/driver.h>
57#include <linux/mlx4/cmd.h>
Matan Barak9433c182014-05-15 15:29:28 +030058#include <linux/mlx4/qp.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070059
60#include "mlx4_ib.h"
Leon Romanovsky9ce28a22016-09-22 17:31:14 +030061#include <rdma/mlx4-abi.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070062
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +030063#define DRV_NAME MLX4_IB_DRV_NAME
Tariq Toukan0a528ee2017-06-07 16:26:15 +030064#define DRV_VERSION "4.0-0"
Roland Dreier225c7b12007-05-08 18:00:38 -070065
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030066#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
Matan Baraka37a1a42013-11-07 15:25:16 +020067#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
Markus Stockhausen50e2ec92014-08-13 14:07:30 +000068#define MLX4_IB_CARD_REV_A0 0xA0
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030069
Roland Dreier225c7b12007-05-08 18:00:38 -070070MODULE_AUTHOR("Roland Dreier");
71MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
72MODULE_LICENSE("Dual BSD/GPL");
Roland Dreier225c7b12007-05-08 18:00:38 -070073
Yishai Hadas56c1d232015-02-12 09:49:43 +020074int mlx4_ib_sm_guid_assign = 0;
Jack Morgensteina0c64a12012-08-03 08:40:49 +000075module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
Yishai Hadas56c1d232015-02-12 09:49:43 +020076MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
Jack Morgensteina0c64a12012-08-03 08:40:49 +000077
Roland Dreier68f39482008-02-04 20:20:44 -080078static const char mlx4_ib_version[] =
Roland Dreier225c7b12007-05-08 18:00:38 -070079 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
Tariq Toukan0a528ee2017-06-07 16:26:15 +030080 DRV_VERSION "\n";
Roland Dreier225c7b12007-05-08 18:00:38 -070081
Jack Morgenstein3806d082012-08-03 08:40:58 +000082static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
Guy Levi400b1eb2017-07-04 16:24:24 +030083static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
84 u8 port_num);
Jack Morgenstein3806d082012-08-03 08:40:58 +000085
Eli Cohenfa417f72010-10-24 21:08:52 -070086static struct workqueue_struct *wq;
87
Roland Dreier225c7b12007-05-08 18:00:38 -070088static void init_query_mad(struct ib_smp *mad)
89{
90 mad->base_version = 1;
91 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
92 mad->class_version = 1;
93 mad->method = IB_MGMT_METHOD_GET;
94}
95
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030096static int check_flow_steering_support(struct mlx4_dev *dev)
97{
Matan Barak0a9b7d52013-11-07 15:25:15 +020098 int eth_num_ports = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030099 int ib_num_ports = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300100
Matan Barak0a9b7d52013-11-07 15:25:15 +0200101 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300102
Matan Barak0a9b7d52013-11-07 15:25:15 +0200103 if (dmfs) {
104 int i;
105 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
106 eth_num_ports++;
107 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
108 ib_num_ports++;
109 dmfs &= (!ib_num_ports ||
110 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
111 (!eth_num_ports ||
112 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
113 if (ib_num_ports && mlx4_is_mfunc(dev)) {
114 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
115 dmfs = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300116 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300117 }
Matan Barak0a9b7d52013-11-07 15:25:15 +0200118 return dmfs;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300119}
120
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300121static int num_ib_ports(struct mlx4_dev *dev)
122{
123 int ib_ports = 0;
124 int i;
125
126 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
127 ib_ports++;
128
129 return ib_ports;
130}
131
Moni Shouae26be1b2015-07-30 18:33:29 +0300132static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
133{
134 struct mlx4_ib_dev *ibdev = to_mdev(device);
135 struct net_device *dev;
136
137 rcu_read_lock();
138 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
139
140 if (dev) {
141 if (mlx4_is_bonded(ibdev->dev)) {
142 struct net_device *upper = NULL;
143
144 upper = netdev_master_upper_dev_get_rcu(dev);
145 if (upper) {
146 struct net_device *active;
147
148 active = bond_option_active_slave_get_rcu(netdev_priv(upper));
149 if (active)
150 dev = active;
151 }
152 }
153 }
154 if (dev)
155 dev_hold(dev);
156
157 rcu_read_unlock();
158 return dev;
159}
160
Moni Shoua7e57b852016-01-14 17:50:35 +0200161static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
162 struct mlx4_ib_dev *ibdev,
163 u8 port_num)
Moni Shouae26be1b2015-07-30 18:33:29 +0300164{
165 struct mlx4_cmd_mailbox *mailbox;
166 int err;
167 struct mlx4_dev *dev = ibdev->dev;
168 int i;
169 union ib_gid *gid_tbl;
170
171 mailbox = mlx4_alloc_cmd_mailbox(dev);
172 if (IS_ERR(mailbox))
173 return -ENOMEM;
174
175 gid_tbl = mailbox->buf;
176
177 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
178 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
179
180 err = mlx4_cmd(dev, mailbox->dma,
181 MLX4_SET_PORT_GID_TABLE << 8 | port_num,
182 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
183 MLX4_CMD_WRAPPED);
184 if (mlx4_is_bonded(dev))
185 err += mlx4_cmd(dev, mailbox->dma,
186 MLX4_SET_PORT_GID_TABLE << 8 | 2,
187 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
188 MLX4_CMD_WRAPPED);
189
190 mlx4_free_cmd_mailbox(dev, mailbox);
191 return err;
192}
193
Moni Shoua7e57b852016-01-14 17:50:35 +0200194static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
195 struct mlx4_ib_dev *ibdev,
196 u8 port_num)
197{
198 struct mlx4_cmd_mailbox *mailbox;
199 int err;
200 struct mlx4_dev *dev = ibdev->dev;
201 int i;
202 struct {
203 union ib_gid gid;
204 __be32 rsrvd1[2];
205 __be16 rsrvd2;
206 u8 type;
207 u8 version;
208 __be32 rsrvd3;
209 } *gid_tbl;
210
211 mailbox = mlx4_alloc_cmd_mailbox(dev);
212 if (IS_ERR(mailbox))
213 return -ENOMEM;
214
215 gid_tbl = mailbox->buf;
216 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
217 memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
218 if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
219 gid_tbl[i].version = 2;
220 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
221 gid_tbl[i].type = 1;
222 else
223 memset(&gid_tbl[i].gid, 0, 12);
224 }
225 }
226
227 err = mlx4_cmd(dev, mailbox->dma,
228 MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
229 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
230 MLX4_CMD_WRAPPED);
231 if (mlx4_is_bonded(dev))
232 err += mlx4_cmd(dev, mailbox->dma,
233 MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
234 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
235 MLX4_CMD_WRAPPED);
236
237 mlx4_free_cmd_mailbox(dev, mailbox);
238 return err;
239}
240
241static int mlx4_ib_update_gids(struct gid_entry *gids,
242 struct mlx4_ib_dev *ibdev,
243 u8 port_num)
244{
245 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
246 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
247
248 return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
249}
250
Moni Shouae26be1b2015-07-30 18:33:29 +0300251static int mlx4_ib_add_gid(struct ib_device *device,
252 u8 port_num,
253 unsigned int index,
254 const union ib_gid *gid,
255 const struct ib_gid_attr *attr,
256 void **context)
257{
258 struct mlx4_ib_dev *ibdev = to_mdev(device);
259 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
260 struct mlx4_port_gid_table *port_gid_table;
261 int free = -1, found = -1;
262 int ret = 0;
263 int hw_update = 0;
264 int i;
265 struct gid_entry *gids = NULL;
266
267 if (!rdma_cap_roce_gid_table(device, port_num))
268 return -EINVAL;
269
270 if (port_num > MLX4_MAX_PORTS)
271 return -EINVAL;
272
273 if (!context)
274 return -EINVAL;
275
276 port_gid_table = &iboe->gids[port_num - 1];
277 spin_lock_bh(&iboe->lock);
278 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
Moni Shouab699a852016-01-14 17:50:33 +0200279 if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid)) &&
280 (port_gid_table->gids[i].gid_type == attr->gid_type)) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300281 found = i;
282 break;
283 }
284 if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid)))
285 free = i; /* HW has space */
286 }
287
288 if (found < 0) {
289 if (free < 0) {
290 ret = -ENOSPC;
291 } else {
292 port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
293 if (!port_gid_table->gids[free].ctx) {
294 ret = -ENOMEM;
295 } else {
296 *context = port_gid_table->gids[free].ctx;
297 memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid));
Moni Shouab699a852016-01-14 17:50:33 +0200298 port_gid_table->gids[free].gid_type = attr->gid_type;
Moni Shouae26be1b2015-07-30 18:33:29 +0300299 port_gid_table->gids[free].ctx->real_index = free;
300 port_gid_table->gids[free].ctx->refcount = 1;
301 hw_update = 1;
302 }
303 }
304 } else {
305 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
306 *context = ctx;
307 ctx->refcount++;
308 }
309 if (!ret && hw_update) {
310 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
311 if (!gids) {
312 ret = -ENOMEM;
313 } else {
Moni Shouab699a852016-01-14 17:50:33 +0200314 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300315 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
Moni Shouab699a852016-01-14 17:50:33 +0200316 gids[i].gid_type = port_gid_table->gids[i].gid_type;
317 }
Moni Shouae26be1b2015-07-30 18:33:29 +0300318 }
319 }
320 spin_unlock_bh(&iboe->lock);
321
322 if (!ret && hw_update) {
323 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
324 kfree(gids);
325 }
326
327 return ret;
328}
329
330static int mlx4_ib_del_gid(struct ib_device *device,
331 u8 port_num,
332 unsigned int index,
333 void **context)
334{
335 struct gid_cache_context *ctx = *context;
336 struct mlx4_ib_dev *ibdev = to_mdev(device);
337 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
338 struct mlx4_port_gid_table *port_gid_table;
339 int ret = 0;
340 int hw_update = 0;
341 struct gid_entry *gids = NULL;
342
343 if (!rdma_cap_roce_gid_table(device, port_num))
344 return -EINVAL;
345
346 if (port_num > MLX4_MAX_PORTS)
347 return -EINVAL;
348
349 port_gid_table = &iboe->gids[port_num - 1];
350 spin_lock_bh(&iboe->lock);
351 if (ctx) {
352 ctx->refcount--;
353 if (!ctx->refcount) {
354 unsigned int real_index = ctx->real_index;
355
356 memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid));
357 kfree(port_gid_table->gids[real_index].ctx);
358 port_gid_table->gids[real_index].ctx = NULL;
359 hw_update = 1;
360 }
361 }
362 if (!ret && hw_update) {
363 int i;
364
365 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
366 if (!gids) {
367 ret = -ENOMEM;
368 } else {
369 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
370 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
371 }
372 }
373 spin_unlock_bh(&iboe->lock);
374
375 if (!ret && hw_update) {
376 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
377 kfree(gids);
378 }
379 return ret;
380}
381
382int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
383 u8 port_num, int index)
384{
385 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
386 struct gid_cache_context *ctx = NULL;
387 union ib_gid gid;
388 struct mlx4_port_gid_table *port_gid_table;
389 int real_index = -EINVAL;
390 int i;
391 int ret;
392 unsigned long flags;
Moni Shouab699a852016-01-14 17:50:33 +0200393 struct ib_gid_attr attr;
Moni Shouae26be1b2015-07-30 18:33:29 +0300394
395 if (port_num > MLX4_MAX_PORTS)
396 return -EINVAL;
397
398 if (mlx4_is_bonded(ibdev->dev))
399 port_num = 1;
400
401 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
402 return index;
403
Moni Shouab699a852016-01-14 17:50:33 +0200404 ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid, &attr);
Moni Shouae26be1b2015-07-30 18:33:29 +0300405 if (ret)
406 return ret;
407
Moni Shouab699a852016-01-14 17:50:33 +0200408 if (attr.ndev)
409 dev_put(attr.ndev);
410
Moni Shouae26be1b2015-07-30 18:33:29 +0300411 if (!memcmp(&gid, &zgid, sizeof(gid)))
412 return -EINVAL;
413
414 spin_lock_irqsave(&iboe->lock, flags);
415 port_gid_table = &iboe->gids[port_num - 1];
416
417 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
Moni Shouab699a852016-01-14 17:50:33 +0200418 if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid)) &&
419 attr.gid_type == port_gid_table->gids[i].gid_type) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300420 ctx = port_gid_table->gids[i].ctx;
421 break;
422 }
423 if (ctx)
424 real_index = ctx->real_index;
425 spin_unlock_irqrestore(&iboe->lock, flags);
426 return real_index;
427}
428
Roland Dreier225c7b12007-05-08 18:00:38 -0700429static int mlx4_ib_query_device(struct ib_device *ibdev,
Matan Barak2528e332015-06-11 16:35:25 +0300430 struct ib_device_attr *props,
431 struct ib_udata *uhw)
Roland Dreier225c7b12007-05-08 18:00:38 -0700432{
433 struct mlx4_ib_dev *dev = to_mdev(ibdev);
434 struct ib_smp *in_mad = NULL;
435 struct ib_smp *out_mad = NULL;
Pan Bian46d07032016-12-04 14:45:38 +0800436 int err;
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300437 int have_ib_ports;
Matan Barak4b664c42015-06-11 16:35:27 +0300438 struct mlx4_uverbs_ex_query_device cmd;
439 struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
440 struct mlx4_clock_params clock_params;
Roland Dreier225c7b12007-05-08 18:00:38 -0700441
Matan Barak4b664c42015-06-11 16:35:27 +0300442 if (uhw->inlen) {
443 if (uhw->inlen < sizeof(cmd))
444 return -EINVAL;
Matan Barak2528e332015-06-11 16:35:25 +0300445
Matan Barak4b664c42015-06-11 16:35:27 +0300446 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
447 if (err)
448 return err;
449
450 if (cmd.comp_mask)
451 return -EINVAL;
452
453 if (cmd.reserved)
454 return -EINVAL;
455 }
456
457 resp.response_length = offsetof(typeof(resp), response_length) +
458 sizeof(resp.response_length);
Roland Dreier225c7b12007-05-08 18:00:38 -0700459 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
460 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
Pan Bian46d07032016-12-04 14:45:38 +0800461 err = -ENOMEM;
Roland Dreier225c7b12007-05-08 18:00:38 -0700462 if (!in_mad || !out_mad)
463 goto out;
464
465 init_query_mad(in_mad);
466 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
467
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000468 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
469 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700470 if (err)
471 goto out;
472
473 memset(props, 0, sizeof *props);
474
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300475 have_ib_ports = num_ib_ports(dev->dev);
476
Roland Dreier225c7b12007-05-08 18:00:38 -0700477 props->fw_ver = dev->dev->caps.fw_ver;
478 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
479 IB_DEVICE_PORT_ACTIVE_EVENT |
480 IB_DEVICE_SYS_IMAGE_GUID |
Ron Livne521e5752008-07-14 23:48:48 -0700481 IB_DEVICE_RC_RNR_NAK_GEN |
482 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
Roland Dreier225c7b12007-05-08 18:00:38 -0700483 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
484 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
485 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
486 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300487 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
Roland Dreier225c7b12007-05-08 18:00:38 -0700488 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
489 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
490 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
Eli Cohen8ff095e2008-04-16 21:01:10 -0700491 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
492 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
Markus Stockhausen50e2ec92014-08-13 14:07:30 +0000493 if (dev->dev->caps.max_gso_sz &&
494 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
495 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
Eli Cohenb832be12008-04-16 21:09:27 -0700496 props->device_cap_flags |= IB_DEVICE_UD_TSO;
Roland Dreier95d04f02008-07-23 08:12:26 -0700497 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
498 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
499 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
500 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
501 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
502 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
Sean Hefty0a1405d2011-06-02 11:32:15 -0700503 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
504 props->device_cap_flags |= IB_DEVICE_XRC;
Shani Michaelib4253882013-02-06 16:19:16 +0000505 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
506 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
507 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
508 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
509 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
510 else
511 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
512 }
Bart Van Asscheca920f52016-06-03 07:58:32 -0700513 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
514 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
Roland Dreier225c7b12007-05-08 18:00:38 -0700515
Bodong Wang070b3992015-09-22 23:18:11 +0300516 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
517
Roland Dreier225c7b12007-05-08 18:00:38 -0700518 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
519 0xffffff;
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200520 props->vendor_part_id = dev->dev->persist->pdev->device;
Roland Dreier225c7b12007-05-08 18:00:38 -0700521 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
522 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
523
524 props->max_mr_size = ~0ull;
525 props->page_size_cap = dev->dev->caps.page_size_cap;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200526 props->max_qp = dev->dev->quotas.qp;
Sagi Grimbergfc2d0042012-05-24 16:08:08 +0300527 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
Roland Dreier225c7b12007-05-08 18:00:38 -0700528 props->max_sge = min(dev->dev->caps.max_sq_sg,
529 dev->dev->caps.max_rq_sg);
Sagi Grimberga5e14ba2015-10-28 13:28:15 +0200530 props->max_sge_rd = MLX4_MAX_SGE_RD;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200531 props->max_cq = dev->dev->quotas.cq;
Roland Dreier225c7b12007-05-08 18:00:38 -0700532 props->max_cqe = dev->dev->caps.max_cqes;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200533 props->max_mr = dev->dev->quotas.mpt;
Roland Dreier225c7b12007-05-08 18:00:38 -0700534 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
535 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
536 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
537 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200538 props->max_srq = dev->dev->quotas.srq;
Jack Morgensteinc8681f12007-06-21 13:39:10 -0700539 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
Roland Dreier225c7b12007-05-08 18:00:38 -0700540 props->max_srq_sge = dev->dev->caps.max_srq_sge;
Eli Cohen5a0fd092010-10-07 16:24:16 +0200541 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
Roland Dreier225c7b12007-05-08 18:00:38 -0700542 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
543 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
544 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
Dotan Barak47e956b2012-07-11 15:39:29 +0000545 props->masked_atomic_cap = props->atomic_cap;
Roland Dreier5ae2a7a2007-06-18 08:15:02 -0700546 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
Roland Dreier225c7b12007-05-08 18:00:38 -0700547 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
548 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
549 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
550 props->max_mcast_grp;
Eli Cohena5bbe892012-02-09 18:10:06 +0200551 props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
Matan Barak4b664c42015-06-11 16:35:27 +0300552 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
553 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
Maor Gottlieb731e0412016-11-10 11:30:58 +0200554 props->max_ah = INT_MAX;
Roland Dreier225c7b12007-05-08 18:00:38 -0700555
Guy Levi400b1eb2017-07-04 16:24:24 +0300556 if ((dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
557 (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
Guy Levi6afff1c2017-07-04 16:24:27 +0300558 mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET)) {
559 props->rss_caps.max_rwq_indirection_tables = props->max_qp;
560 props->rss_caps.max_rwq_indirection_table_size =
561 dev->dev->caps.max_rss_tbl_sz;
562 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
Guy Levi400b1eb2017-07-04 16:24:24 +0300563 props->max_wq_type_rq = props->max_qp;
Guy Levi6afff1c2017-07-04 16:24:27 +0300564 }
Guy Levi400b1eb2017-07-04 16:24:24 +0300565
Matan Barak8a7ff142015-07-01 14:31:02 +0300566 if (!mlx4_is_slave(dev->dev))
567 err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
Matan Barak4b664c42015-06-11 16:35:27 +0300568
569 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
Matan Barak4b664c42015-06-11 16:35:27 +0300570 resp.response_length += sizeof(resp.hca_core_clock_offset);
Matan Barak8a7ff142015-07-01 14:31:02 +0300571 if (!err && !mlx4_is_slave(dev->dev)) {
572 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
573 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
574 }
Matan Barak4b664c42015-06-11 16:35:27 +0300575 }
576
Maor Gottliebea30b962017-06-21 09:26:28 +0300577 if (uhw->outlen >= resp.response_length +
578 sizeof(resp.max_inl_recv_sz)) {
579 resp.response_length += sizeof(resp.max_inl_recv_sz);
580 resp.max_inl_recv_sz = dev->dev->caps.max_rq_sg *
581 sizeof(struct mlx4_wqe_data_seg);
582 }
583
Matan Barak4b664c42015-06-11 16:35:27 +0300584 if (uhw->outlen) {
585 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
586 if (err)
587 goto out;
588 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700589out:
590 kfree(in_mad);
591 kfree(out_mad);
592
593 return err;
594}
595
Eli Cohenfa417f72010-10-24 21:08:52 -0700596static enum rdma_link_layer
597mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
598{
599 struct mlx4_dev *dev = to_mdev(device)->dev;
600
Jack Morgenstein65dab252011-12-13 04:10:41 +0000601 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
Eli Cohenfa417f72010-10-24 21:08:52 -0700602 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
603}
604
605static int ib_link_query_port(struct ib_device *ibdev, u8 port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000606 struct ib_port_attr *props, int netw_view)
Eli Cohenfa417f72010-10-24 21:08:52 -0700607{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200608 struct ib_smp *in_mad = NULL;
609 struct ib_smp *out_mad = NULL;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300610 int ext_active_speed;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000611 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200612 int err = -ENOMEM;
613
614 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
615 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
616 if (!in_mad || !out_mad)
617 goto out;
618
619 init_query_mad(in_mad);
620 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
621 in_mad->attr_mod = cpu_to_be32(port);
622
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000623 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
624 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
625
626 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
Or Gerlitza9c766b2012-01-11 19:00:29 +0200627 in_mad, out_mad);
628 if (err)
629 goto out;
630
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300631
Eli Cohenfa417f72010-10-24 21:08:52 -0700632 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
633 props->lmc = out_mad->data[34] & 0x7;
634 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
635 props->sm_sl = out_mad->data[36] & 0xf;
636 props->state = out_mad->data[32] & 0xf;
637 props->phys_state = out_mad->data[33] >> 4;
638 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000639 if (netw_view)
640 props->gid_tbl_len = out_mad->data[50];
641 else
642 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
Eli Cohenfa417f72010-10-24 21:08:52 -0700643 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
644 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
645 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
646 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
647 props->active_width = out_mad->data[31] & 0xf;
648 props->active_speed = out_mad->data[35] >> 4;
649 props->max_mtu = out_mad->data[41] & 0xf;
650 props->active_mtu = out_mad->data[36] >> 4;
651 props->subnet_timeout = out_mad->data[51] & 0x1f;
652 props->max_vl_num = out_mad->data[37] >> 4;
653 props->init_type_reply = out_mad->data[41] >> 4;
654
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300655 /* Check if extended speeds (EDR/FDR/...) are supported */
656 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
657 ext_active_speed = out_mad->data[62] >> 4;
658
659 switch (ext_active_speed) {
660 case 1:
Or Gerlitz2e966912012-02-28 18:49:50 +0200661 props->active_speed = IB_SPEED_FDR;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300662 break;
663 case 2:
Or Gerlitz2e966912012-02-28 18:49:50 +0200664 props->active_speed = IB_SPEED_EDR;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300665 break;
666 }
667 }
668
669 /* If reported active speed is QDR, check if is FDR-10 */
Or Gerlitz2e966912012-02-28 18:49:50 +0200670 if (props->active_speed == IB_SPEED_QDR) {
Or Gerlitz8154c072012-03-06 15:50:50 +0200671 init_query_mad(in_mad);
672 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
673 in_mad->attr_mod = cpu_to_be32(port);
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300674
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000675 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
Or Gerlitz8154c072012-03-06 15:50:50 +0200676 NULL, NULL, in_mad, out_mad);
677 if (err)
Jesper Juhlbf6b47d2012-04-11 23:43:29 +0200678 goto out;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300679
Or Gerlitz8154c072012-03-06 15:50:50 +0200680 /* Checking LinkSpeedActive for FDR-10 */
681 if (out_mad->data[15] & 0x1)
682 props->active_speed = IB_SPEED_FDR10;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300683 }
Or Gerlitzd2ef4062012-04-02 17:45:20 +0300684
685 /* Avoid wrong speed value returned by FW if the IB link is down. */
686 if (props->state == IB_PORT_DOWN)
687 props->active_speed = IB_SPEED_SDR;
688
Or Gerlitza9c766b2012-01-11 19:00:29 +0200689out:
690 kfree(in_mad);
691 kfree(out_mad);
692 return err;
Eli Cohenfa417f72010-10-24 21:08:52 -0700693}
694
695static u8 state_to_phys_state(enum ib_port_state state)
696{
697 return state == IB_PORT_ACTIVE ? 5 : 3;
698}
699
700static int eth_link_query_port(struct ib_device *ibdev, u8 port,
Leon Romanovsky850b7412017-01-25 20:26:18 +0200701 struct ib_port_attr *props)
Eli Cohenfa417f72010-10-24 21:08:52 -0700702{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200703
704 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
705 struct mlx4_ib_iboe *iboe = &mdev->iboe;
Eli Cohenfa417f72010-10-24 21:08:52 -0700706 struct net_device *ndev;
707 enum ib_mtu tmp;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200708 struct mlx4_cmd_mailbox *mailbox;
709 int err = 0;
Moni Shouaa5750092015-02-03 16:48:37 +0200710 int is_bonded = mlx4_is_bonded(mdev->dev);
Eli Cohenfa417f72010-10-24 21:08:52 -0700711
Or Gerlitza9c766b2012-01-11 19:00:29 +0200712 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
713 if (IS_ERR(mailbox))
714 return PTR_ERR(mailbox);
715
716 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
717 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
718 MLX4_CMD_WRAPPED);
719 if (err)
720 goto out;
721
Saeed Mahameed6fa26202016-11-10 11:30:59 +0200722 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
723 (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
724 IB_WIDTH_4X : IB_WIDTH_1X;
725 props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
726 IB_SPEED_FDR : IB_SPEED_QDR;
Moni Shouab4a26a22014-02-09 11:54:34 +0200727 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200728 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
729 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
Eli Cohenfa417f72010-10-24 21:08:52 -0700730 props->pkey_tbl_len = 1;
Or Gerlitzbcacb892011-10-10 10:53:41 +0200731 props->max_mtu = IB_MTU_4096;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200732 props->max_vl_num = 2;
Eli Cohenfa417f72010-10-24 21:08:52 -0700733 props->state = IB_PORT_DOWN;
734 props->phys_state = state_to_phys_state(props->state);
735 props->active_mtu = IB_MTU_256;
Jack Morgensteindba3ad22014-08-21 14:28:41 +0300736 spin_lock_bh(&iboe->lock);
Eli Cohenfa417f72010-10-24 21:08:52 -0700737 ndev = iboe->netdevs[port - 1];
Moni Shoua5070cd22015-07-30 18:33:30 +0300738 if (ndev && is_bonded) {
739 rcu_read_lock(); /* required to get upper dev */
740 ndev = netdev_master_upper_dev_get_rcu(ndev);
741 rcu_read_unlock();
742 }
Eli Cohenfa417f72010-10-24 21:08:52 -0700743 if (!ndev)
Or Gerlitza9c766b2012-01-11 19:00:29 +0200744 goto out_unlock;
Eli Cohenfa417f72010-10-24 21:08:52 -0700745
746 tmp = iboe_get_mtu(ndev->mtu);
747 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
748
Eli Cohen21d606092010-11-11 21:05:58 +0000749 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
Eli Cohenfa417f72010-10-24 21:08:52 -0700750 IB_PORT_ACTIVE : IB_PORT_DOWN;
751 props->phys_state = state_to_phys_state(props->state);
Or Gerlitza9c766b2012-01-11 19:00:29 +0200752out_unlock:
Jack Morgensteindba3ad22014-08-21 14:28:41 +0300753 spin_unlock_bh(&iboe->lock);
Or Gerlitza9c766b2012-01-11 19:00:29 +0200754out:
755 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
756 return err;
Eli Cohenfa417f72010-10-24 21:08:52 -0700757}
758
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000759int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
760 struct ib_port_attr *props, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700761{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200762 int err;
Roland Dreier225c7b12007-05-08 18:00:38 -0700763
Or Gerlitzc4550c62017-01-24 13:02:39 +0200764 /* props being zeroed by the caller, avoid zeroing it here */
Roland Dreier225c7b12007-05-08 18:00:38 -0700765
Eli Cohenfa417f72010-10-24 21:08:52 -0700766 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000767 ib_link_query_port(ibdev, port, props, netw_view) :
Leon Romanovsky850b7412017-01-25 20:26:18 +0200768 eth_link_query_port(ibdev, port, props);
Roland Dreier225c7b12007-05-08 18:00:38 -0700769
770 return err;
771}
772
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000773static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
774 struct ib_port_attr *props)
775{
776 /* returns host view */
777 return __mlx4_ib_query_port(ibdev, port, props, 0);
778}
779
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000780int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
781 union ib_gid *gid, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700782{
783 struct ib_smp *in_mad = NULL;
784 struct ib_smp *out_mad = NULL;
785 int err = -ENOMEM;
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000786 struct mlx4_ib_dev *dev = to_mdev(ibdev);
787 int clear = 0;
788 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -0700789
790 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
791 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
792 if (!in_mad || !out_mad)
793 goto out;
794
795 init_query_mad(in_mad);
796 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
797 in_mad->attr_mod = cpu_to_be32(port);
798
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000799 if (mlx4_is_mfunc(dev->dev) && netw_view)
800 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
801
802 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700803 if (err)
804 goto out;
805
806 memcpy(gid->raw, out_mad->data + 8, 8);
807
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000808 if (mlx4_is_mfunc(dev->dev) && !netw_view) {
809 if (index) {
810 /* For any index > 0, return the null guid */
811 err = 0;
812 clear = 1;
813 goto out;
814 }
815 }
816
Roland Dreier225c7b12007-05-08 18:00:38 -0700817 init_query_mad(in_mad);
818 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
819 in_mad->attr_mod = cpu_to_be32(index / 8);
820
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000821 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000822 NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700823 if (err)
824 goto out;
825
826 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
827
828out:
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000829 if (clear)
830 memset(gid->raw + 8, 0, 8);
Roland Dreier225c7b12007-05-08 18:00:38 -0700831 kfree(in_mad);
832 kfree(out_mad);
833 return err;
834}
835
Eli Cohenfa417f72010-10-24 21:08:52 -0700836static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
837 union ib_gid *gid)
838{
Moni Shoua5070cd22015-07-30 18:33:30 +0300839 int ret;
840
841 if (rdma_protocol_ib(ibdev, port))
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000842 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
Moni Shoua5070cd22015-07-30 18:33:30 +0300843
844 if (!rdma_protocol_roce(ibdev, port))
845 return -ENODEV;
846
847 if (!rdma_cap_roce_gid_table(ibdev, port))
848 return -ENODEV;
849
Matan Barak55ee3ab2015-10-15 18:38:45 +0300850 ret = ib_get_cached_gid(ibdev, port, index, gid, NULL);
Moni Shoua5070cd22015-07-30 18:33:30 +0300851 if (ret == -EAGAIN) {
852 memcpy(gid, &zgid, sizeof(*gid));
853 return 0;
854 }
855
856 return ret;
Eli Cohenfa417f72010-10-24 21:08:52 -0700857}
858
Jack Morgensteinfd10ed82016-09-12 19:16:21 +0300859static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
860{
861 union sl2vl_tbl_to_u64 sl2vl64;
862 struct ib_smp *in_mad = NULL;
863 struct ib_smp *out_mad = NULL;
864 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
865 int err = -ENOMEM;
866 int jj;
867
868 if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
869 *sl2vl_tbl = 0;
870 return 0;
871 }
872
873 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
874 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
875 if (!in_mad || !out_mad)
876 goto out;
877
878 init_query_mad(in_mad);
879 in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
880 in_mad->attr_mod = 0;
881
882 if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
883 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
884
885 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
886 in_mad, out_mad);
887 if (err)
888 goto out;
889
890 for (jj = 0; jj < 8; jj++)
891 sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
892 *sl2vl_tbl = sl2vl64.sl64;
893
894out:
895 kfree(in_mad);
896 kfree(out_mad);
897 return err;
898}
899
900static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
901{
902 u64 sl2vl;
903 int i;
904 int err;
905
906 for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
907 if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
908 continue;
909 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
910 if (err) {
911 pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n",
912 i, err);
913 sl2vl = 0;
914 }
915 atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
916 }
917}
918
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000919int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
920 u16 *pkey, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700921{
922 struct ib_smp *in_mad = NULL;
923 struct ib_smp *out_mad = NULL;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000924 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -0700925 int err = -ENOMEM;
926
927 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
928 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
929 if (!in_mad || !out_mad)
930 goto out;
931
932 init_query_mad(in_mad);
933 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
934 in_mad->attr_mod = cpu_to_be32(index / 32);
935
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000936 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
937 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
938
939 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
940 in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700941 if (err)
942 goto out;
943
944 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
945
946out:
947 kfree(in_mad);
948 kfree(out_mad);
949 return err;
950}
951
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000952static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
953{
954 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
955}
956
Roland Dreier225c7b12007-05-08 18:00:38 -0700957static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
958 struct ib_device_modify *props)
959{
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000960 struct mlx4_cmd_mailbox *mailbox;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000961 unsigned long flags;
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000962
Roland Dreier225c7b12007-05-08 18:00:38 -0700963 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
964 return -EOPNOTSUPP;
965
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000966 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
967 return 0;
968
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +0000969 if (mlx4_is_slave(to_mdev(ibdev)->dev))
970 return -EOPNOTSUPP;
971
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000972 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700973 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000974 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000975
976 /*
977 * If possible, pass node desc to FW, so it can generate
978 * a 144 trap. If cmd fails, just ignore.
979 */
980 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
981 if (IS_ERR(mailbox))
982 return 0;
983
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700984 memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000985 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +0000986 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000987
988 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
Roland Dreier225c7b12007-05-08 18:00:38 -0700989
990 return 0;
991}
992
Jack Morgenstein61565012014-05-29 16:31:01 +0300993static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
994 u32 cap_mask)
Roland Dreier225c7b12007-05-08 18:00:38 -0700995{
996 struct mlx4_cmd_mailbox *mailbox;
997 int err;
998
999 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
1000 if (IS_ERR(mailbox))
1001 return PTR_ERR(mailbox);
1002
Roland Dreier5ae2a7a2007-06-18 08:15:02 -07001003 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1004 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
1005 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
1006 } else {
1007 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
1008 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
1009 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001010
Ido Shamaya130b592015-04-02 16:31:19 +03001011 err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
1012 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1013 MLX4_CMD_WRAPPED);
Roland Dreier225c7b12007-05-08 18:00:38 -07001014
1015 mlx4_free_cmd_mailbox(dev->dev, mailbox);
1016 return err;
1017}
1018
1019static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1020 struct ib_port_modify *props)
1021{
Jack Morgenstein61565012014-05-29 16:31:01 +03001022 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1023 u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
Roland Dreier225c7b12007-05-08 18:00:38 -07001024 struct ib_port_attr attr;
1025 u32 cap_mask;
1026 int err;
1027
Jack Morgenstein61565012014-05-29 16:31:01 +03001028 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
1029 * of whether port link layer is ETH or IB. For ETH ports, qkey
1030 * violations and port capabilities are not meaningful.
1031 */
1032 if (is_eth)
1033 return 0;
1034
1035 mutex_lock(&mdev->cap_mask_mutex);
Roland Dreier225c7b12007-05-08 18:00:38 -07001036
Or Gerlitzc4550c62017-01-24 13:02:39 +02001037 err = ib_query_port(ibdev, port, &attr);
Roland Dreier225c7b12007-05-08 18:00:38 -07001038 if (err)
1039 goto out;
1040
1041 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
1042 ~props->clr_port_cap_mask;
1043
Jack Morgenstein61565012014-05-29 16:31:01 +03001044 err = mlx4_ib_SET_PORT(mdev, port,
1045 !!(mask & IB_PORT_RESET_QKEY_CNTR),
1046 cap_mask);
Roland Dreier225c7b12007-05-08 18:00:38 -07001047
1048out:
1049 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1050 return err;
1051}
1052
1053static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
1054 struct ib_udata *udata)
1055{
1056 struct mlx4_ib_dev *dev = to_mdev(ibdev);
1057 struct mlx4_ib_ucontext *context;
Or Gerlitz08ff3232012-10-21 14:59:24 +00001058 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
Roland Dreier225c7b12007-05-08 18:00:38 -07001059 struct mlx4_ib_alloc_ucontext_resp resp;
1060 int err;
1061
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07001062 if (!dev->ib_active)
1063 return ERR_PTR(-EAGAIN);
1064
Or Gerlitz08ff3232012-10-21 14:59:24 +00001065 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
1066 resp_v3.qp_tab_size = dev->dev->caps.num_qps;
1067 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
1068 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1069 } else {
1070 resp.dev_caps = dev->dev->caps.userspace_caps;
1071 resp.qp_tab_size = dev->dev->caps.num_qps;
1072 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
1073 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1074 resp.cqe_size = dev->dev->caps.cqe_size;
1075 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001076
Yishai Hadasae184dd2015-08-13 18:32:06 +03001077 context = kzalloc(sizeof(*context), GFP_KERNEL);
Roland Dreier225c7b12007-05-08 18:00:38 -07001078 if (!context)
1079 return ERR_PTR(-ENOMEM);
1080
1081 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1082 if (err) {
1083 kfree(context);
1084 return ERR_PTR(err);
1085 }
1086
1087 INIT_LIST_HEAD(&context->db_page_list);
1088 mutex_init(&context->db_page_mutex);
1089
Guy Levi400b1eb2017-07-04 16:24:24 +03001090 INIT_LIST_HEAD(&context->wqn_ranges_list);
1091 mutex_init(&context->wqn_ranges_mutex);
1092
Or Gerlitz08ff3232012-10-21 14:59:24 +00001093 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1094 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1095 else
1096 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1097
Roland Dreier225c7b12007-05-08 18:00:38 -07001098 if (err) {
1099 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1100 kfree(context);
1101 return ERR_PTR(-EFAULT);
1102 }
1103
1104 return &context->ibucontext;
1105}
1106
1107static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1108{
1109 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1110
1111 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1112 kfree(context);
1113
1114 return 0;
1115}
1116
Yishai Hadasae184dd2015-08-13 18:32:06 +03001117static void mlx4_ib_vma_open(struct vm_area_struct *area)
1118{
1119 /* vma_open is called when a new VMA is created on top of our VMA.
1120 * This is done through either mremap flow or split_vma (usually due
1121 * to mlock, madvise, munmap, etc.). We do not support a clone of the
1122 * vma, as this VMA is strongly hardware related. Therefore we set the
1123 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1124 * calling us again and trying to do incorrect actions. We assume that
1125 * the original vma size is exactly a single page that there will be no
1126 * "splitting" operations on.
1127 */
1128 area->vm_ops = NULL;
1129}
1130
1131static void mlx4_ib_vma_close(struct vm_area_struct *area)
1132{
1133 struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
1134
1135 /* It's guaranteed that all VMAs opened on a FD are closed before the
1136 * file itself is closed, therefore no sync is needed with the regular
1137 * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
1138 * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
1139 * The close operation is usually called under mm->mmap_sem except when
1140 * process is exiting. The exiting case is handled explicitly as part
1141 * of mlx4_ib_disassociate_ucontext.
1142 */
1143 mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
1144 area->vm_private_data;
1145
1146 /* set the vma context pointer to null in the mlx4_ib driver's private
1147 * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
1148 */
1149 mlx4_ib_vma_priv_data->vma = NULL;
1150}
1151
1152static const struct vm_operations_struct mlx4_ib_vm_ops = {
1153 .open = mlx4_ib_vma_open,
1154 .close = mlx4_ib_vma_close
1155};
1156
1157static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1158{
1159 int i;
1160 int ret = 0;
1161 struct vm_area_struct *vma;
1162 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1163 struct task_struct *owning_process = NULL;
1164 struct mm_struct *owning_mm = NULL;
1165
1166 owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
1167 if (!owning_process)
1168 return;
1169
1170 owning_mm = get_task_mm(owning_process);
1171 if (!owning_mm) {
1172 pr_info("no mm, disassociate ucontext is pending task termination\n");
1173 while (1) {
1174 /* make sure that task is dead before returning, it may
1175 * prevent a rare case of module down in parallel to a
1176 * call to mlx4_ib_vma_close.
1177 */
1178 put_task_struct(owning_process);
Leon Romanovsky98e77d92017-05-23 11:29:42 +03001179 usleep_range(1000, 2000);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001180 owning_process = get_pid_task(ibcontext->tgid,
1181 PIDTYPE_PID);
1182 if (!owning_process ||
1183 owning_process->state == TASK_DEAD) {
1184 pr_info("disassociate ucontext done, task was terminated\n");
1185 /* in case task was dead need to release the task struct */
1186 if (owning_process)
1187 put_task_struct(owning_process);
1188 return;
1189 }
1190 }
1191 }
1192
1193 /* need to protect from a race on closing the vma as part of
1194 * mlx4_ib_vma_close().
1195 */
Maor Gottlieb22c36532017-03-29 06:03:00 +03001196 down_write(&owning_mm->mmap_sem);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001197 for (i = 0; i < HW_BAR_COUNT; i++) {
1198 vma = context->hw_bar_info[i].vma;
1199 if (!vma)
1200 continue;
1201
1202 ret = zap_vma_ptes(context->hw_bar_info[i].vma,
1203 context->hw_bar_info[i].vma->vm_start,
1204 PAGE_SIZE);
1205 if (ret) {
1206 pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret);
1207 BUG_ON(1);
1208 }
1209
Maor Gottliebca37a662017-03-29 06:03:01 +03001210 context->hw_bar_info[i].vma->vm_flags &=
1211 ~(VM_SHARED | VM_MAYSHARE);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001212 /* context going to be destroyed, should not access ops any more */
1213 context->hw_bar_info[i].vma->vm_ops = NULL;
1214 }
1215
Maor Gottlieb22c36532017-03-29 06:03:00 +03001216 up_write(&owning_mm->mmap_sem);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001217 mmput(owning_mm);
1218 put_task_struct(owning_process);
1219}
1220
1221static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
1222 struct mlx4_ib_vma_private_data *vma_private_data)
1223{
1224 vma_private_data->vma = vma;
1225 vma->vm_private_data = vma_private_data;
1226 vma->vm_ops = &mlx4_ib_vm_ops;
1227}
1228
Roland Dreier225c7b12007-05-08 18:00:38 -07001229static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1230{
1231 struct mlx4_ib_dev *dev = to_mdev(context->device);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001232 struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
Roland Dreier225c7b12007-05-08 18:00:38 -07001233
1234 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1235 return -EINVAL;
1236
1237 if (vma->vm_pgoff == 0) {
Yishai Hadasae184dd2015-08-13 18:32:06 +03001238 /* We prevent double mmaping on same context */
1239 if (mucontext->hw_bar_info[HW_BAR_DB].vma)
1240 return -EINVAL;
1241
Roland Dreier225c7b12007-05-08 18:00:38 -07001242 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1243
1244 if (io_remap_pfn_range(vma, vma->vm_start,
1245 to_mucontext(context)->uar.pfn,
1246 PAGE_SIZE, vma->vm_page_prot))
1247 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001248
1249 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
1250
Roland Dreier225c7b12007-05-08 18:00:38 -07001251 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
Yishai Hadasae184dd2015-08-13 18:32:06 +03001252 /* We prevent double mmaping on same context */
1253 if (mucontext->hw_bar_info[HW_BAR_BF].vma)
1254 return -EINVAL;
1255
Roland Dreiere1d60ec2009-03-30 08:31:05 -07001256 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Roland Dreier225c7b12007-05-08 18:00:38 -07001257
1258 if (io_remap_pfn_range(vma, vma->vm_start,
1259 to_mucontext(context)->uar.pfn +
1260 dev->dev->caps.num_uars,
1261 PAGE_SIZE, vma->vm_page_prot))
1262 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001263
1264 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
1265
Matan Barak52033cf2015-06-11 16:35:26 +03001266 } else if (vma->vm_pgoff == 3) {
1267 struct mlx4_clock_params params;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001268 int ret;
1269
1270 /* We prevent double mmaping on same context */
1271 if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
1272 return -EINVAL;
1273
1274 ret = mlx4_get_internal_clock_params(dev->dev, &params);
Matan Barak52033cf2015-06-11 16:35:26 +03001275
1276 if (ret)
1277 return ret;
1278
1279 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1280 if (io_remap_pfn_range(vma, vma->vm_start,
1281 (pci_resource_start(dev->dev->persist->pdev,
1282 params.bar) +
1283 params.offset)
1284 >> PAGE_SHIFT,
1285 PAGE_SIZE, vma->vm_page_prot))
1286 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001287
1288 mlx4_ib_set_vma_data(vma,
1289 &mucontext->hw_bar_info[HW_BAR_CLOCK]);
Matan Barak52033cf2015-06-11 16:35:26 +03001290 } else {
Roland Dreier225c7b12007-05-08 18:00:38 -07001291 return -EINVAL;
Matan Barak52033cf2015-06-11 16:35:26 +03001292 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001293
1294 return 0;
1295}
1296
1297static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
1298 struct ib_ucontext *context,
1299 struct ib_udata *udata)
1300{
1301 struct mlx4_ib_pd *pd;
1302 int err;
1303
1304 pd = kmalloc(sizeof *pd, GFP_KERNEL);
1305 if (!pd)
1306 return ERR_PTR(-ENOMEM);
1307
1308 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1309 if (err) {
1310 kfree(pd);
1311 return ERR_PTR(err);
1312 }
1313
1314 if (context)
1315 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
1316 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1317 kfree(pd);
1318 return ERR_PTR(-EFAULT);
1319 }
1320
1321 return &pd->ibpd;
1322}
1323
1324static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
1325{
1326 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1327 kfree(pd);
1328
1329 return 0;
1330}
1331
Sean Hefty012a8ff2011-06-02 09:01:33 -07001332static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
1333 struct ib_ucontext *context,
1334 struct ib_udata *udata)
1335{
1336 struct mlx4_ib_xrcd *xrcd;
Matan Barak8e372102015-06-11 16:35:21 +03001337 struct ib_cq_init_attr cq_attr = {};
Sean Hefty012a8ff2011-06-02 09:01:33 -07001338 int err;
1339
1340 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1341 return ERR_PTR(-ENOSYS);
1342
1343 xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
1344 if (!xrcd)
1345 return ERR_PTR(-ENOMEM);
1346
1347 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
1348 if (err)
1349 goto err1;
1350
Christoph Hellwiged082d32016-09-05 12:56:17 +02001351 xrcd->pd = ib_alloc_pd(ibdev, 0);
Sean Hefty012a8ff2011-06-02 09:01:33 -07001352 if (IS_ERR(xrcd->pd)) {
1353 err = PTR_ERR(xrcd->pd);
1354 goto err2;
1355 }
1356
Matan Barak8e372102015-06-11 16:35:21 +03001357 cq_attr.cqe = 1;
1358 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
Sean Hefty012a8ff2011-06-02 09:01:33 -07001359 if (IS_ERR(xrcd->cq)) {
1360 err = PTR_ERR(xrcd->cq);
1361 goto err3;
1362 }
1363
1364 return &xrcd->ibxrcd;
1365
1366err3:
1367 ib_dealloc_pd(xrcd->pd);
1368err2:
1369 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
1370err1:
1371 kfree(xrcd);
1372 return ERR_PTR(err);
1373}
1374
1375static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1376{
1377 ib_destroy_cq(to_mxrcd(xrcd)->cq);
1378 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1379 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1380 kfree(xrcd);
1381
1382 return 0;
1383}
1384
Eli Cohenfa417f72010-10-24 21:08:52 -07001385static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1386{
1387 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1388 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1389 struct mlx4_ib_gid_entry *ge;
1390
1391 ge = kzalloc(sizeof *ge, GFP_KERNEL);
1392 if (!ge)
1393 return -ENOMEM;
1394
1395 ge->gid = *gid;
1396 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1397 ge->port = mqp->port;
1398 ge->added = 1;
1399 }
1400
1401 mutex_lock(&mqp->mutex);
1402 list_add_tail(&ge->list, &mqp->gid_list);
1403 mutex_unlock(&mqp->mutex);
1404
1405 return 0;
1406}
1407
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03001408static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1409 struct mlx4_ib_counters *ctr_table)
1410{
1411 struct counter_index *counter, *tmp_count;
1412
1413 mutex_lock(&ctr_table->mutex);
1414 list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1415 list) {
1416 if (counter->allocated)
1417 mlx4_counter_free(ibdev->dev, counter->index);
1418 list_del(&counter->list);
1419 kfree(counter);
1420 }
1421 mutex_unlock(&ctr_table->mutex);
1422}
1423
Eli Cohenfa417f72010-10-24 21:08:52 -07001424int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1425 union ib_gid *gid)
1426{
Eli Cohenfa417f72010-10-24 21:08:52 -07001427 struct net_device *ndev;
1428 int ret = 0;
1429
1430 if (!mqp->port)
1431 return 0;
1432
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001433 spin_lock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001434 ndev = mdev->iboe.netdevs[mqp->port - 1];
1435 if (ndev)
1436 dev_hold(ndev);
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001437 spin_unlock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001438
1439 if (ndev) {
Eli Cohenfa417f72010-10-24 21:08:52 -07001440 ret = 1;
Eli Cohenfa417f72010-10-24 21:08:52 -07001441 dev_put(ndev);
1442 }
1443
1444 return ret;
1445}
1446
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001447struct mlx4_ib_steering {
1448 struct list_head list;
Moni Shoua146d6e12015-02-03 16:48:38 +02001449 struct mlx4_flow_reg_id reg_id;
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001450 union ib_gid gid;
1451};
1452
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001453#define LAST_ETH_FIELD vlan_tag
1454#define LAST_IB_FIELD sl
1455#define LAST_IPV4_FIELD dst_ip
1456#define LAST_TCP_UDP_FIELD src_port
1457
1458/* Field is the last supported field */
1459#define FIELDS_NOT_SUPPORTED(filter, field)\
1460 memchr_inv((void *)&filter.field +\
1461 sizeof(filter.field), 0,\
1462 sizeof(filter) -\
1463 offsetof(typeof(filter), field) -\
1464 sizeof(filter.field))
1465
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001466static int parse_flow_attr(struct mlx4_dev *dev,
Matan Baraka37a1a42013-11-07 15:25:16 +02001467 u32 qp_num,
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001468 union ib_flow_spec *ib_spec,
1469 struct _rule_hw *mlx4_spec)
1470{
1471 enum mlx4_net_trans_rule_id type;
1472
1473 switch (ib_spec->type) {
1474 case IB_FLOW_SPEC_ETH:
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001475 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1476 return -ENOTSUPP;
1477
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001478 type = MLX4_NET_TRANS_RULE_ID_ETH;
1479 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1480 ETH_ALEN);
1481 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1482 ETH_ALEN);
1483 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1484 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1485 break;
Matan Baraka37a1a42013-11-07 15:25:16 +02001486 case IB_FLOW_SPEC_IB:
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001487 if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
1488 return -ENOTSUPP;
1489
Matan Baraka37a1a42013-11-07 15:25:16 +02001490 type = MLX4_NET_TRANS_RULE_ID_IB;
1491 mlx4_spec->ib.l3_qpn =
1492 cpu_to_be32(qp_num);
1493 mlx4_spec->ib.qpn_mask =
1494 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1495 break;
1496
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001497
1498 case IB_FLOW_SPEC_IPV4:
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001499 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1500 return -ENOTSUPP;
1501
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001502 type = MLX4_NET_TRANS_RULE_ID_IPV4;
1503 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1504 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1505 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1506 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1507 break;
1508
1509 case IB_FLOW_SPEC_TCP:
1510 case IB_FLOW_SPEC_UDP:
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001511 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
1512 return -ENOTSUPP;
1513
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001514 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1515 MLX4_NET_TRANS_RULE_ID_TCP :
1516 MLX4_NET_TRANS_RULE_ID_UDP;
1517 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1518 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1519 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1520 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1521 break;
1522
1523 default:
1524 return -EINVAL;
1525 }
1526 if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1527 mlx4_hw_rule_sz(dev, type) < 0)
1528 return -EINVAL;
1529 mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1530 mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1531 return mlx4_hw_rule_sz(dev, type);
1532}
1533
Matan Baraka37a1a42013-11-07 15:25:16 +02001534struct default_rules {
1535 __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1536 __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1537 __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1538 __u8 link_layer;
1539};
1540static const struct default_rules default_table[] = {
1541 {
1542 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
1543 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1544 .rules_create_list = {IB_FLOW_SPEC_IB},
1545 .link_layer = IB_LINK_LAYER_INFINIBAND
1546 }
1547};
1548
1549static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1550 struct ib_flow_attr *flow_attr)
1551{
1552 int i, j, k;
1553 void *ib_flow;
1554 const struct default_rules *pdefault_rules = default_table;
1555 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1556
Fabian Fredericka57f23f2014-08-12 19:20:07 -04001557 for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001558 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1559 memset(&field_types, 0, sizeof(field_types));
1560
1561 if (link_layer != pdefault_rules->link_layer)
1562 continue;
1563
1564 ib_flow = flow_attr + 1;
1565 /* we assume the specs are sorted */
1566 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1567 j < flow_attr->num_of_specs; k++) {
1568 union ib_flow_spec *current_flow =
1569 (union ib_flow_spec *)ib_flow;
1570
1571 /* same layer but different type */
1572 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1573 (pdefault_rules->mandatory_fields[k] &
1574 IB_FLOW_SPEC_LAYER_MASK)) &&
1575 (current_flow->type !=
1576 pdefault_rules->mandatory_fields[k]))
1577 goto out;
1578
1579 /* same layer, try match next one */
1580 if (current_flow->type ==
1581 pdefault_rules->mandatory_fields[k]) {
1582 j++;
1583 ib_flow +=
1584 ((union ib_flow_spec *)ib_flow)->size;
1585 }
1586 }
1587
1588 ib_flow = flow_attr + 1;
1589 for (j = 0; j < flow_attr->num_of_specs;
1590 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1591 for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1592 /* same layer and same type */
1593 if (((union ib_flow_spec *)ib_flow)->type ==
1594 pdefault_rules->mandatory_not_fields[k])
1595 goto out;
1596
1597 return i;
1598 }
1599out:
1600 return -1;
1601}
1602
1603static int __mlx4_ib_create_default_rules(
1604 struct mlx4_ib_dev *mdev,
1605 struct ib_qp *qp,
1606 const struct default_rules *pdefault_rules,
1607 struct _rule_hw *mlx4_spec) {
1608 int size = 0;
1609 int i;
1610
Fabian Fredericka57f23f2014-08-12 19:20:07 -04001611 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001612 int ret;
1613 union ib_flow_spec ib_spec;
1614 switch (pdefault_rules->rules_create_list[i]) {
1615 case 0:
1616 /* no rule */
1617 continue;
1618 case IB_FLOW_SPEC_IB:
1619 ib_spec.type = IB_FLOW_SPEC_IB;
1620 ib_spec.size = sizeof(struct ib_flow_spec_ib);
1621
1622 break;
1623 default:
1624 /* invalid rule */
1625 return -EINVAL;
1626 }
1627 /* We must put empty rule, qpn is being ignored */
1628 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1629 mlx4_spec);
1630 if (ret < 0) {
1631 pr_info("invalid parsing\n");
1632 return -EINVAL;
1633 }
1634
1635 mlx4_spec = (void *)mlx4_spec + ret;
1636 size += ret;
1637 }
1638 return size;
1639}
1640
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001641static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1642 int domain,
1643 enum mlx4_net_trans_promisc_mode flow_type,
1644 u64 *reg_id)
1645{
1646 int ret, i;
1647 int size = 0;
1648 void *ib_flow;
1649 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1650 struct mlx4_cmd_mailbox *mailbox;
1651 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
Matan Baraka37a1a42013-11-07 15:25:16 +02001652 int default_flow;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001653
1654 static const u16 __mlx4_domain[] = {
1655 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1656 [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1657 [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1658 [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1659 };
1660
1661 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1662 pr_err("Invalid priority value %d\n", flow_attr->priority);
1663 return -EINVAL;
1664 }
1665
1666 if (domain >= IB_FLOW_DOMAIN_NUM) {
1667 pr_err("Invalid domain value %d\n", domain);
1668 return -EINVAL;
1669 }
1670
1671 if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1672 return -EINVAL;
1673
1674 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1675 if (IS_ERR(mailbox))
1676 return PTR_ERR(mailbox);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001677 ctrl = mailbox->buf;
1678
1679 ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1680 flow_attr->priority);
1681 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1682 ctrl->port = flow_attr->port;
1683 ctrl->qpn = cpu_to_be32(qp->qp_num);
1684
1685 ib_flow = flow_attr + 1;
1686 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
Matan Baraka37a1a42013-11-07 15:25:16 +02001687 /* Add default flows */
1688 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1689 if (default_flow >= 0) {
1690 ret = __mlx4_ib_create_default_rules(
1691 mdev, qp, default_table + default_flow,
1692 mailbox->buf + size);
1693 if (ret < 0) {
1694 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1695 return -EINVAL;
1696 }
1697 size += ret;
1698 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001699 for (i = 0; i < flow_attr->num_of_specs; i++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001700 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1701 mailbox->buf + size);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001702 if (ret < 0) {
1703 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1704 return -EINVAL;
1705 }
1706 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1707 size += ret;
1708 }
1709
Jack Morgenstein10b1c042016-12-29 18:37:13 +02001710 if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
1711 flow_attr->num_of_specs == 1) {
1712 struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
1713 enum ib_flow_spec_type header_spec =
1714 ((union ib_flow_spec *)(flow_attr + 1))->type;
1715
1716 if (header_spec == IB_FLOW_SPEC_ETH)
1717 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1718 }
1719
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001720 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1721 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
Jack Morgenstein10b1c042016-12-29 18:37:13 +02001722 MLX4_CMD_NATIVE);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001723 if (ret == -ENOMEM)
1724 pr_err("mcg table is full. Fail to register network rule.\n");
1725 else if (ret == -ENXIO)
1726 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1727 else if (ret)
Colin Ian King35fc7b72016-04-25 20:26:50 +01001728 pr_err("Invalid argument. Fail to register network rule.\n");
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001729
1730 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1731 return ret;
1732}
1733
1734static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1735{
1736 int err;
1737 err = mlx4_cmd(dev, reg_id, 0, 0,
1738 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
Jack Morgenstein10b1c042016-12-29 18:37:13 +02001739 MLX4_CMD_NATIVE);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001740 if (err)
1741 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1742 reg_id);
1743 return err;
1744}
1745
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001746static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1747 u64 *reg_id)
1748{
1749 void *ib_flow;
1750 union ib_flow_spec *ib_spec;
1751 struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1752 int err = 0;
1753
Or Gerlitz5eff6da2015-01-15 15:28:54 +02001754 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1755 dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001756 return 0; /* do nothing */
1757
1758 ib_flow = flow_attr + 1;
1759 ib_spec = (union ib_flow_spec *)ib_flow;
1760
1761 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1762 return 0; /* do nothing */
1763
1764 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1765 flow_attr->port, qp->qp_num,
1766 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1767 reg_id);
1768 return err;
1769}
1770
Marina Varshaver0e451e82016-02-18 18:31:06 +02001771static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
1772 struct ib_flow_attr *flow_attr,
1773 enum mlx4_net_trans_promisc_mode *type)
1774{
1775 int err = 0;
1776
1777 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
1778 (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
1779 (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
1780 return -EOPNOTSUPP;
1781 }
1782
1783 if (flow_attr->num_of_specs == 0) {
1784 type[0] = MLX4_FS_MC_SNIFFER;
1785 type[1] = MLX4_FS_UC_SNIFFER;
1786 } else {
1787 union ib_flow_spec *ib_spec;
1788
1789 ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1790 if (ib_spec->type != IB_FLOW_SPEC_ETH)
1791 return -EINVAL;
1792
1793 /* if all is zero than MC and UC */
1794 if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
1795 type[0] = MLX4_FS_MC_SNIFFER;
1796 type[1] = MLX4_FS_UC_SNIFFER;
1797 } else {
1798 u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
1799 ib_spec->eth.mask.dst_mac[1],
1800 ib_spec->eth.mask.dst_mac[2],
1801 ib_spec->eth.mask.dst_mac[3],
1802 ib_spec->eth.mask.dst_mac[4],
1803 ib_spec->eth.mask.dst_mac[5]};
1804
1805 /* Above xor was only on MC bit, non empty mask is valid
1806 * only if this bit is set and rest are zero.
1807 */
1808 if (!is_zero_ether_addr(&mac[0]))
1809 return -EINVAL;
1810
1811 if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
1812 type[0] = MLX4_FS_MC_SNIFFER;
1813 else
1814 type[0] = MLX4_FS_UC_SNIFFER;
1815 }
1816 }
1817
1818 return err;
1819}
1820
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001821static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1822 struct ib_flow_attr *flow_attr,
1823 int domain)
1824{
Moni Shoua146d6e12015-02-03 16:48:38 +02001825 int err = 0, i = 0, j = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001826 struct mlx4_ib_flow *mflow;
1827 enum mlx4_net_trans_promisc_mode type[2];
Moni Shoua146d6e12015-02-03 16:48:38 +02001828 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1829 int is_bonded = mlx4_is_bonded(dev);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001830
Yishai Hadas5533c182016-06-22 17:27:30 +03001831 if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
1832 return ERR_PTR(-EINVAL);
1833
Marina Varshaver0e451e82016-02-18 18:31:06 +02001834 if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1835 (flow_attr->type != IB_FLOW_ATTR_NORMAL))
Marina Varshavera3100a72016-02-18 18:31:05 +02001836 return ERR_PTR(-EOPNOTSUPP);
1837
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001838 memset(type, 0, sizeof(type));
1839
1840 mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1841 if (!mflow) {
1842 err = -ENOMEM;
1843 goto err_free;
1844 }
1845
1846 switch (flow_attr->type) {
1847 case IB_FLOW_ATTR_NORMAL:
Marina Varshaver0e451e82016-02-18 18:31:06 +02001848 /* If dont trap flag (continue match) is set, under specific
1849 * condition traffic be replicated to given qp,
1850 * without stealing it
1851 */
1852 if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
1853 err = mlx4_ib_add_dont_trap_rule(dev,
1854 flow_attr,
1855 type);
1856 if (err)
1857 goto err_free;
1858 } else {
1859 type[0] = MLX4_FS_REGULAR;
1860 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001861 break;
1862
1863 case IB_FLOW_ATTR_ALL_DEFAULT:
1864 type[0] = MLX4_FS_ALL_DEFAULT;
1865 break;
1866
1867 case IB_FLOW_ATTR_MC_DEFAULT:
1868 type[0] = MLX4_FS_MC_DEFAULT;
1869 break;
1870
1871 case IB_FLOW_ATTR_SNIFFER:
Marina Varshaver0e451e82016-02-18 18:31:06 +02001872 type[0] = MLX4_FS_MIRROR_RX_PORT;
1873 type[1] = MLX4_FS_MIRROR_SX_PORT;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001874 break;
1875
1876 default:
1877 err = -EINVAL;
1878 goto err_free;
1879 }
1880
1881 while (i < ARRAY_SIZE(type) && type[i]) {
1882 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
Moni Shoua146d6e12015-02-03 16:48:38 +02001883 &mflow->reg_id[i].id);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001884 if (err)
Or Gerlitz571e1b22014-10-30 15:59:28 +02001885 goto err_create_flow;
Moni Shoua146d6e12015-02-03 16:48:38 +02001886 if (is_bonded) {
Moni Shoua824c25c2015-02-08 11:49:33 +02001887 /* Application always sees one port so the mirror rule
1888 * must be on port #2
1889 */
Moni Shoua146d6e12015-02-03 16:48:38 +02001890 flow_attr->port = 2;
1891 err = __mlx4_ib_create_flow(qp, flow_attr,
1892 domain, type[j],
1893 &mflow->reg_id[j].mirror);
1894 flow_attr->port = 1;
1895 if (err)
1896 goto err_create_flow;
1897 j++;
1898 }
1899
Roland Dreier11562562015-05-29 23:11:27 -07001900 i++;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001901 }
1902
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001903 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
Moni Shoua146d6e12015-02-03 16:48:38 +02001904 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1905 &mflow->reg_id[i].id);
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001906 if (err)
Or Gerlitz571e1b22014-10-30 15:59:28 +02001907 goto err_create_flow;
Roland Dreier11562562015-05-29 23:11:27 -07001908
Moni Shoua146d6e12015-02-03 16:48:38 +02001909 if (is_bonded) {
1910 flow_attr->port = 2;
1911 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1912 &mflow->reg_id[j].mirror);
1913 flow_attr->port = 1;
1914 if (err)
1915 goto err_create_flow;
1916 j++;
1917 }
1918 /* function to create mirror rule */
Roland Dreier11562562015-05-29 23:11:27 -07001919 i++;
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001920 }
1921
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001922 return &mflow->ibflow;
1923
Or Gerlitz571e1b22014-10-30 15:59:28 +02001924err_create_flow:
1925 while (i) {
Moni Shoua146d6e12015-02-03 16:48:38 +02001926 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1927 mflow->reg_id[i].id);
Or Gerlitz571e1b22014-10-30 15:59:28 +02001928 i--;
1929 }
Moni Shoua146d6e12015-02-03 16:48:38 +02001930
1931 while (j) {
1932 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1933 mflow->reg_id[j].mirror);
1934 j--;
1935 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001936err_free:
1937 kfree(mflow);
1938 return ERR_PTR(err);
1939}
1940
1941static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1942{
1943 int err, ret = 0;
1944 int i = 0;
1945 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1946 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1947
Moni Shoua146d6e12015-02-03 16:48:38 +02001948 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1949 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001950 if (err)
1951 ret = err;
Moni Shoua146d6e12015-02-03 16:48:38 +02001952 if (mflow->reg_id[i].mirror) {
1953 err = __mlx4_ib_destroy_flow(mdev->dev,
1954 mflow->reg_id[i].mirror);
1955 if (err)
1956 ret = err;
1957 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001958 i++;
1959 }
1960
1961 kfree(mflow);
1962 return ret;
1963}
1964
Roland Dreier225c7b12007-05-08 18:00:38 -07001965static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1966{
Eli Cohenfa417f72010-10-24 21:08:52 -07001967 int err;
1968 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
Moni Shoua146d6e12015-02-03 16:48:38 +02001969 struct mlx4_dev *dev = mdev->dev;
Eli Cohenfa417f72010-10-24 21:08:52 -07001970 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001971 struct mlx4_ib_steering *ib_steering = NULL;
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001972 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
Moni Shoua146d6e12015-02-03 16:48:38 +02001973 struct mlx4_flow_reg_id reg_id;
Eli Cohenfa417f72010-10-24 21:08:52 -07001974
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001975 if (mdev->dev->caps.steering_mode ==
1976 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1977 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1978 if (!ib_steering)
1979 return -ENOMEM;
1980 }
1981
1982 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1983 !!(mqp->flags &
1984 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
Moni Shoua146d6e12015-02-03 16:48:38 +02001985 prot, &reg_id.id);
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001986 if (err) {
1987 pr_err("multicast attach op failed, err %d\n", err);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001988 goto err_malloc;
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001989 }
Eli Cohenfa417f72010-10-24 21:08:52 -07001990
Moni Shoua146d6e12015-02-03 16:48:38 +02001991 reg_id.mirror = 0;
1992 if (mlx4_is_bonded(dev)) {
Moni Shoua824c25c2015-02-08 11:49:33 +02001993 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1994 (mqp->port == 1) ? 2 : 1,
Moni Shoua146d6e12015-02-03 16:48:38 +02001995 !!(mqp->flags &
1996 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1997 prot, &reg_id.mirror);
1998 if (err)
1999 goto err_add;
2000 }
2001
Eli Cohenfa417f72010-10-24 21:08:52 -07002002 err = add_gid_entry(ibqp, gid);
2003 if (err)
2004 goto err_add;
2005
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00002006 if (ib_steering) {
2007 memcpy(ib_steering->gid.raw, gid->raw, 16);
2008 ib_steering->reg_id = reg_id;
2009 mutex_lock(&mqp->mutex);
2010 list_add(&ib_steering->list, &mqp->steering_rules);
2011 mutex_unlock(&mqp->mutex);
2012 }
Eli Cohenfa417f72010-10-24 21:08:52 -07002013 return 0;
2014
2015err_add:
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00002016 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
Moni Shoua146d6e12015-02-03 16:48:38 +02002017 prot, reg_id.id);
2018 if (reg_id.mirror)
2019 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
2020 prot, reg_id.mirror);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00002021err_malloc:
2022 kfree(ib_steering);
2023
Eli Cohenfa417f72010-10-24 21:08:52 -07002024 return err;
2025}
2026
2027static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
2028{
2029 struct mlx4_ib_gid_entry *ge;
2030 struct mlx4_ib_gid_entry *tmp;
2031 struct mlx4_ib_gid_entry *ret = NULL;
2032
2033 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
2034 if (!memcmp(raw, ge->gid.raw, 16)) {
2035 ret = ge;
2036 break;
2037 }
2038 }
2039
2040 return ret;
Roland Dreier225c7b12007-05-08 18:00:38 -07002041}
2042
2043static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2044{
Eli Cohenfa417f72010-10-24 21:08:52 -07002045 int err;
2046 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
Moni Shoua146d6e12015-02-03 16:48:38 +02002047 struct mlx4_dev *dev = mdev->dev;
Eli Cohenfa417f72010-10-24 21:08:52 -07002048 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
Eli Cohenfa417f72010-10-24 21:08:52 -07002049 struct net_device *ndev;
2050 struct mlx4_ib_gid_entry *ge;
Moni Shoua146d6e12015-02-03 16:48:38 +02002051 struct mlx4_flow_reg_id reg_id = {0, 0};
Or Gerlitze9a7faf2014-12-17 16:17:34 +02002052 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
Eli Cohenfa417f72010-10-24 21:08:52 -07002053
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00002054 if (mdev->dev->caps.steering_mode ==
2055 MLX4_STEERING_MODE_DEVICE_MANAGED) {
2056 struct mlx4_ib_steering *ib_steering;
2057
2058 mutex_lock(&mqp->mutex);
2059 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
2060 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
2061 list_del(&ib_steering->list);
2062 break;
2063 }
2064 }
2065 mutex_unlock(&mqp->mutex);
2066 if (&ib_steering->list == &mqp->steering_rules) {
2067 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
2068 return -EINVAL;
2069 }
2070 reg_id = ib_steering->reg_id;
2071 kfree(ib_steering);
2072 }
2073
2074 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
Moni Shoua146d6e12015-02-03 16:48:38 +02002075 prot, reg_id.id);
Eli Cohenfa417f72010-10-24 21:08:52 -07002076 if (err)
2077 return err;
2078
Moni Shoua146d6e12015-02-03 16:48:38 +02002079 if (mlx4_is_bonded(dev)) {
2080 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
2081 prot, reg_id.mirror);
2082 if (err)
2083 return err;
2084 }
2085
Eli Cohenfa417f72010-10-24 21:08:52 -07002086 mutex_lock(&mqp->mutex);
2087 ge = find_gid_entry(mqp, gid->raw);
2088 if (ge) {
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002089 spin_lock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07002090 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
2091 if (ndev)
2092 dev_hold(ndev);
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002093 spin_unlock_bh(&mdev->iboe.lock);
Moni Shouad487ee72013-12-12 18:03:13 +02002094 if (ndev)
Eli Cohenfa417f72010-10-24 21:08:52 -07002095 dev_put(ndev);
Eli Cohenfa417f72010-10-24 21:08:52 -07002096 list_del(&ge->list);
2097 kfree(ge);
2098 } else
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03002099 pr_warn("could not find mgid entry\n");
Eli Cohenfa417f72010-10-24 21:08:52 -07002100
2101 mutex_unlock(&mqp->mutex);
2102
2103 return 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002104}
2105
2106static int init_node_data(struct mlx4_ib_dev *dev)
2107{
2108 struct ib_smp *in_mad = NULL;
2109 struct ib_smp *out_mad = NULL;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00002110 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -07002111 int err = -ENOMEM;
2112
2113 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
2114 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
2115 if (!in_mad || !out_mad)
2116 goto out;
2117
2118 init_query_mad(in_mad);
2119 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00002120 if (mlx4_is_master(dev->dev))
2121 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
Roland Dreier225c7b12007-05-08 18:00:38 -07002122
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00002123 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -07002124 if (err)
2125 goto out;
2126
Yuval Shaiabd99fde2016-08-25 10:57:07 -07002127 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
Roland Dreier225c7b12007-05-08 18:00:38 -07002128
2129 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
2130
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00002131 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -07002132 if (err)
2133 goto out;
2134
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00002135 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
Roland Dreier225c7b12007-05-08 18:00:38 -07002136 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
2137
2138out:
2139 kfree(in_mad);
2140 kfree(out_mad);
2141 return err;
2142}
2143
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002144static ssize_t show_hca(struct device *device, struct device_attribute *attr,
2145 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002146{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002147 struct mlx4_ib_dev *dev =
2148 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002149 return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002150}
2151
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002152static ssize_t show_rev(struct device *device, struct device_attribute *attr,
2153 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002154{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002155 struct mlx4_ib_dev *dev =
2156 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002157 return sprintf(buf, "%x\n", dev->dev->rev_id);
2158}
2159
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002160static ssize_t show_board(struct device *device, struct device_attribute *attr,
2161 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002162{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002163 struct mlx4_ib_dev *dev =
2164 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
2165 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
2166 dev->dev->board_id);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002167}
2168
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002169static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002170static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
2171static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002172
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002173static struct device_attribute *mlx4_class_attributes[] = {
2174 &dev_attr_hw_rev,
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002175 &dev_attr_hca_type,
2176 &dev_attr_board_id
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002177};
2178
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002179struct diag_counter {
2180 const char *name;
2181 u32 offset;
2182};
2183
2184#define DIAG_COUNTER(_name, _offset) \
2185 { .name = #_name, .offset = _offset }
2186
2187static const struct diag_counter diag_basic[] = {
2188 DIAG_COUNTER(rq_num_lle, 0x00),
2189 DIAG_COUNTER(sq_num_lle, 0x04),
2190 DIAG_COUNTER(rq_num_lqpoe, 0x08),
2191 DIAG_COUNTER(sq_num_lqpoe, 0x0C),
2192 DIAG_COUNTER(rq_num_lpe, 0x18),
2193 DIAG_COUNTER(sq_num_lpe, 0x1C),
2194 DIAG_COUNTER(rq_num_wrfe, 0x20),
2195 DIAG_COUNTER(sq_num_wrfe, 0x24),
2196 DIAG_COUNTER(sq_num_mwbe, 0x2C),
2197 DIAG_COUNTER(sq_num_bre, 0x34),
2198 DIAG_COUNTER(sq_num_rire, 0x44),
2199 DIAG_COUNTER(rq_num_rire, 0x48),
2200 DIAG_COUNTER(sq_num_rae, 0x4C),
2201 DIAG_COUNTER(rq_num_rae, 0x50),
2202 DIAG_COUNTER(sq_num_roe, 0x54),
2203 DIAG_COUNTER(sq_num_tree, 0x5C),
2204 DIAG_COUNTER(sq_num_rree, 0x64),
2205 DIAG_COUNTER(rq_num_rnr, 0x68),
2206 DIAG_COUNTER(sq_num_rnr, 0x6C),
2207 DIAG_COUNTER(rq_num_oos, 0x100),
2208 DIAG_COUNTER(sq_num_oos, 0x104),
2209};
2210
2211static const struct diag_counter diag_ext[] = {
2212 DIAG_COUNTER(rq_num_dup, 0x130),
2213 DIAG_COUNTER(sq_num_to, 0x134),
2214};
2215
2216static const struct diag_counter diag_device_only[] = {
2217 DIAG_COUNTER(num_cqovf, 0x1A0),
2218 DIAG_COUNTER(rq_num_udsdprd, 0x118),
2219};
2220
2221static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
2222 u8 port_num)
2223{
2224 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2225 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2226
2227 if (!diag[!!port_num].name)
2228 return NULL;
2229
2230 return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
2231 diag[!!port_num].num_counters,
2232 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2233}
2234
2235static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
2236 struct rdma_hw_stats *stats,
2237 u8 port, int index)
2238{
2239 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2240 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2241 u32 hw_value[ARRAY_SIZE(diag_device_only) +
2242 ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
2243 int ret;
2244 int i;
2245
2246 ret = mlx4_query_diag_counters(dev->dev,
2247 MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
2248 diag[!!port].offset, hw_value,
2249 diag[!!port].num_counters, port);
2250
2251 if (ret)
2252 return ret;
2253
2254 for (i = 0; i < diag[!!port].num_counters; i++)
2255 stats->value[i] = hw_value[i];
2256
2257 return diag[!!port].num_counters;
2258}
2259
2260static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
2261 const char ***name,
2262 u32 **offset,
2263 u32 *num,
2264 bool port)
2265{
2266 u32 num_counters;
2267
2268 num_counters = ARRAY_SIZE(diag_basic);
2269
2270 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
2271 num_counters += ARRAY_SIZE(diag_ext);
2272
2273 if (!port)
2274 num_counters += ARRAY_SIZE(diag_device_only);
2275
2276 *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
2277 if (!*name)
2278 return -ENOMEM;
2279
2280 *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
2281 if (!*offset)
2282 goto err_name;
2283
2284 *num = num_counters;
2285
2286 return 0;
2287
2288err_name:
2289 kfree(*name);
2290 return -ENOMEM;
2291}
2292
2293static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
2294 const char **name,
2295 u32 *offset,
2296 bool port)
2297{
2298 int i;
2299 int j;
2300
2301 for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
2302 name[i] = diag_basic[i].name;
2303 offset[i] = diag_basic[i].offset;
2304 }
2305
2306 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
2307 for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
2308 name[j] = diag_ext[i].name;
2309 offset[j] = diag_ext[i].offset;
2310 }
2311 }
2312
2313 if (!port) {
2314 for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
2315 name[j] = diag_device_only[i].name;
2316 offset[j] = diag_device_only[i].offset;
2317 }
2318 }
2319}
2320
2321static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2322{
2323 struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
2324 int i;
2325 int ret;
2326 bool per_port = !!(ibdev->dev->caps.flags2 &
2327 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
2328
Kamal Heib69d269d382016-09-12 19:16:22 +03002329 if (mlx4_is_slave(ibdev->dev))
2330 return 0;
2331
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002332 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2333 /* i == 1 means we are building port counters */
2334 if (i && !per_port)
2335 continue;
2336
2337 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
2338 &diag[i].offset,
2339 &diag[i].num_counters, i);
2340 if (ret)
2341 goto err_alloc;
2342
2343 mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
2344 diag[i].offset, i);
2345 }
2346
2347 ibdev->ib_dev.get_hw_stats = mlx4_ib_get_hw_stats;
2348 ibdev->ib_dev.alloc_hw_stats = mlx4_ib_alloc_hw_stats;
2349
2350 return 0;
2351
2352err_alloc:
2353 if (i) {
2354 kfree(diag[i - 1].name);
2355 kfree(diag[i - 1].offset);
2356 }
2357
2358 return ret;
2359}
2360
2361static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
2362{
2363 int i;
2364
2365 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2366 kfree(ibdev->diag_counters[i].offset);
2367 kfree(ibdev->diag_counters[i].name);
2368 }
2369}
2370
Matan Barak9433c182014-05-15 15:29:28 +03002371#define MLX4_IB_INVALID_MAC ((u64)-1)
2372static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2373 struct net_device *dev,
2374 int port)
2375{
2376 u64 new_smac = 0;
2377 u64 release_mac = MLX4_IB_INVALID_MAC;
2378 struct mlx4_ib_qp *qp;
2379
2380 read_lock(&dev_base_lock);
2381 new_smac = mlx4_mac_to_u64(dev->dev_addr);
2382 read_unlock(&dev_base_lock);
2383
Jack Morgenstein3e0629c2014-09-11 14:11:17 +03002384 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2385
Jack Morgensteind24d9f42014-09-11 14:11:18 +03002386 /* no need for update QP1 and mac registration in non-SRIOV */
2387 if (!mlx4_is_mfunc(ibdev->dev))
2388 return;
2389
Matan Barak9433c182014-05-15 15:29:28 +03002390 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2391 qp = ibdev->qp1_proxy[port - 1];
2392 if (qp) {
2393 int new_smac_index;
Jack Morgenstein25476b02014-09-11 14:11:20 +03002394 u64 old_smac;
Matan Barak9433c182014-05-15 15:29:28 +03002395 struct mlx4_update_qp_params update_params;
2396
Jack Morgenstein25476b02014-09-11 14:11:20 +03002397 mutex_lock(&qp->mutex);
2398 old_smac = qp->pri.smac;
Matan Barak9433c182014-05-15 15:29:28 +03002399 if (new_smac == old_smac)
2400 goto unlock;
2401
2402 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2403
2404 if (new_smac_index < 0)
2405 goto unlock;
2406
2407 update_params.smac_index = new_smac_index;
Matan Barak09e05c32014-09-10 16:41:56 +03002408 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
Matan Barak9433c182014-05-15 15:29:28 +03002409 &update_params)) {
2410 release_mac = new_smac;
2411 goto unlock;
2412 }
Jack Morgenstein25476b02014-09-11 14:11:20 +03002413 /* if old port was zero, no mac was yet registered for this QP */
2414 if (qp->pri.smac_port)
2415 release_mac = old_smac;
Matan Barak9433c182014-05-15 15:29:28 +03002416 qp->pri.smac = new_smac;
Jack Morgenstein25476b02014-09-11 14:11:20 +03002417 qp->pri.smac_port = port;
Matan Barak9433c182014-05-15 15:29:28 +03002418 qp->pri.smac_index = new_smac_index;
Matan Barak9433c182014-05-15 15:29:28 +03002419 }
2420
2421unlock:
Matan Barak9433c182014-05-15 15:29:28 +03002422 if (release_mac != MLX4_IB_INVALID_MAC)
2423 mlx4_unregister_mac(ibdev->dev, port, release_mac);
Jack Morgenstein25476b02014-09-11 14:11:20 +03002424 if (qp)
2425 mutex_unlock(&qp->mutex);
2426 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
Matan Barak9433c182014-05-15 15:29:28 +03002427}
2428
Matan Barak9433c182014-05-15 15:29:28 +03002429static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2430 struct net_device *dev,
2431 unsigned long event)
2432
Moni Shouad487ee72013-12-12 18:03:13 +02002433{
2434 struct mlx4_ib_iboe *iboe;
Matan Barak9433c182014-05-15 15:29:28 +03002435 int update_qps_port = -1;
Moni Shouad487ee72013-12-12 18:03:13 +02002436 int port;
2437
Moni Shoua5070cd22015-07-30 18:33:30 +03002438 ASSERT_RTNL();
2439
Moni Shouad487ee72013-12-12 18:03:13 +02002440 iboe = &ibdev->iboe;
2441
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002442 spin_lock_bh(&iboe->lock);
Moni Shouad487ee72013-12-12 18:03:13 +02002443 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
Moni Shouaad4885d22014-02-05 15:13:02 +02002444
Moni Shouad487ee72013-12-12 18:03:13 +02002445 iboe->netdevs[port - 1] =
2446 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
Moni Shouad487ee72013-12-12 18:03:13 +02002447
Matan Barak9433c182014-05-15 15:29:28 +03002448 if (dev == iboe->netdevs[port - 1] &&
2449 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2450 event == NETDEV_UP || event == NETDEV_CHANGE))
2451 update_qps_port = port;
2452
Moni Shouad487ee72013-12-12 18:03:13 +02002453 }
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002454 spin_unlock_bh(&iboe->lock);
Matan Barak9433c182014-05-15 15:29:28 +03002455
2456 if (update_qps_port > 0)
2457 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
Moni Shouad487ee72013-12-12 18:03:13 +02002458}
2459
2460static int mlx4_ib_netdev_event(struct notifier_block *this,
2461 unsigned long event, void *ptr)
2462{
Jiri Pirko351638e2013-05-28 01:30:21 +00002463 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Eli Cohenfa417f72010-10-24 21:08:52 -07002464 struct mlx4_ib_dev *ibdev;
Eli Cohenfa417f72010-10-24 21:08:52 -07002465
2466 if (!net_eq(dev_net(dev), &init_net))
2467 return NOTIFY_DONE;
2468
2469 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
Matan Barak9433c182014-05-15 15:29:28 +03002470 mlx4_ib_scan_netdevs(ibdev, dev, event);
Eli Cohenfa417f72010-10-24 21:08:52 -07002471
2472 return NOTIFY_DONE;
2473}
2474
Jack Morgenstein54679e12012-08-03 08:40:43 +00002475static void init_pkeys(struct mlx4_ib_dev *ibdev)
2476{
2477 int port;
2478 int slave;
2479 int i;
2480
2481 if (mlx4_is_master(ibdev->dev)) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002482 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2483 ++slave) {
Jack Morgenstein54679e12012-08-03 08:40:43 +00002484 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2485 for (i = 0;
2486 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2487 ++i) {
2488 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2489 /* master has the identity virt2phys pkey mapping */
2490 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2491 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2492 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2493 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2494 }
2495 }
2496 }
2497 /* initialize pkey cache */
2498 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2499 for (i = 0;
2500 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2501 ++i)
2502 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2503 (i) ? 0 : 0xFFFF;
2504 }
2505 }
2506}
2507
Shlomo Pongratze605b742012-04-29 17:04:27 +03002508static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2509{
Matan Barakc66fa192015-05-31 09:30:16 +03002510 int i, j, eq = 0, total_eqs = 0;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002511
Matan Barakc66fa192015-05-31 09:30:16 +03002512 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2513 sizeof(ibdev->eq_table[0]), GFP_KERNEL);
Shlomo Pongratze605b742012-04-29 17:04:27 +03002514 if (!ibdev->eq_table)
2515 return;
2516
Matan Barakc66fa192015-05-31 09:30:16 +03002517 for (i = 1; i <= dev->caps.num_ports; i++) {
2518 for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2519 j++, total_eqs++) {
2520 if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
2521 continue;
2522 ibdev->eq_table[eq] = total_eqs;
2523 if (!mlx4_assign_eq(dev, i,
2524 &ibdev->eq_table[eq]))
2525 eq++;
2526 else
2527 ibdev->eq_table[eq] = -1;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002528 }
2529 }
2530
Matan Barakc66fa192015-05-31 09:30:16 +03002531 for (i = eq; i < dev->caps.num_comp_vectors;
2532 ibdev->eq_table[i++] = -1)
2533 ;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002534
2535 /* Advertise the new number of EQs to clients */
Matan Barakc66fa192015-05-31 09:30:16 +03002536 ibdev->ib_dev.num_comp_vectors = eq;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002537}
2538
2539static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2540{
2541 int i;
Matan Barakc66fa192015-05-31 09:30:16 +03002542 int total_eqs = ibdev->ib_dev.num_comp_vectors;
Shlomo Pongratz3aac6ff2012-05-24 16:08:07 +03002543
Matan Barakc66fa192015-05-31 09:30:16 +03002544 /* no eqs were allocated */
Shlomo Pongratz3aac6ff2012-05-24 16:08:07 +03002545 if (!ibdev->eq_table)
2546 return;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002547
2548 /* Reset the advertised EQ number */
Matan Barakc66fa192015-05-31 09:30:16 +03002549 ibdev->ib_dev.num_comp_vectors = 0;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002550
Matan Barakc66fa192015-05-31 09:30:16 +03002551 for (i = 0; i < total_eqs; i++)
Shlomo Pongratze605b742012-04-29 17:04:27 +03002552 mlx4_release_eq(dev, ibdev->eq_table[i]);
Shlomo Pongratze605b742012-04-29 17:04:27 +03002553
Shlomo Pongratze605b742012-04-29 17:04:27 +03002554 kfree(ibdev->eq_table);
Matan Barakc66fa192015-05-31 09:30:16 +03002555 ibdev->eq_table = NULL;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002556}
2557
Ira Weiny77386132015-05-13 20:02:58 -04002558static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2559 struct ib_port_immutable *immutable)
2560{
2561 struct ib_port_attr attr;
Matan Barak4ed088e2016-01-14 17:50:43 +02002562 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
Ira Weiny77386132015-05-13 20:02:58 -04002563 int err;
2564
Matan Barak4ed088e2016-01-14 17:50:43 +02002565 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
Ira Weinyf9b22e32015-05-13 20:02:59 -04002566 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
Or Gerlitzbc63f9d2017-01-24 13:02:37 +02002567 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
Matan Barak4ed088e2016-01-14 17:50:43 +02002568 } else {
2569 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2570 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2571 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2572 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2573 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
Or Gerlitzbc63f9d2017-01-24 13:02:37 +02002574 immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
2575 if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
2576 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
2577 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
Matan Barak4ed088e2016-01-14 17:50:43 +02002578 }
Ira Weinyf9b22e32015-05-13 20:02:59 -04002579
Or Gerlitzc4550c62017-01-24 13:02:39 +02002580 err = ib_query_port(ibdev, port_num, &attr);
2581 if (err)
2582 return err;
2583
2584 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2585 immutable->gid_tbl_len = attr.gid_tbl_len;
2586
Ira Weiny77386132015-05-13 20:02:58 -04002587 return 0;
2588}
2589
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03002590static void get_fw_ver_str(struct ib_device *device, char *str)
Ira Weinye9db59f2016-06-15 02:22:00 -04002591{
2592 struct mlx4_ib_dev *dev =
2593 container_of(device, struct mlx4_ib_dev, ib_dev);
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03002594 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
Ira Weinye9db59f2016-06-15 02:22:00 -04002595 (int) (dev->dev->caps.fw_ver >> 32),
2596 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
2597 (int) dev->dev->caps.fw_ver & 0xffff);
2598}
2599
Roland Dreier225c7b12007-05-08 18:00:38 -07002600static void *mlx4_ib_add(struct mlx4_dev *dev)
2601{
2602 struct mlx4_ib_dev *ibdev;
Roland Dreier22e7ef92009-01-09 13:22:29 -08002603 int num_ports = 0;
Jack Morgenstein035b1032012-05-10 23:28:09 +03002604 int i, j;
Eli Cohenfa417f72010-10-24 21:08:52 -07002605 int err;
2606 struct mlx4_ib_iboe *iboe;
Matan Barak41966702014-02-02 17:06:47 +02002607 int ib_num_ports = 0;
Moni Shouaa5750092015-02-03 16:48:37 +02002608 int num_req_counters;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002609 int allocated;
2610 u32 counter_index;
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002611 struct counter_index *new_counter_index = NULL;
Roland Dreier225c7b12007-05-08 18:00:38 -07002612
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03002613 pr_info_once("%s", mlx4_ib_version);
Roland Dreier68f39482008-02-04 20:20:44 -08002614
Jack Morgenstein026149c2012-08-03 08:40:55 +00002615 num_ports = 0;
Eli Cohenfa417f72010-10-24 21:08:52 -07002616 mlx4_foreach_ib_transport_port(i, dev)
Roland Dreier22e7ef92009-01-09 13:22:29 -08002617 num_ports++;
2618
2619 /* No point in registering a device with no ports... */
2620 if (num_ports == 0)
2621 return NULL;
2622
Roland Dreier225c7b12007-05-08 18:00:38 -07002623 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
2624 if (!ibdev) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002625 dev_err(&dev->persist->pdev->dev,
2626 "Device struct alloc failed\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07002627 return NULL;
2628 }
2629
Eli Cohenfa417f72010-10-24 21:08:52 -07002630 iboe = &ibdev->iboe;
2631
Roland Dreier225c7b12007-05-08 18:00:38 -07002632 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2633 goto err_dealloc;
2634
2635 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2636 goto err_pd;
2637
Roland Dreier4979d182011-01-12 09:50:36 -08002638 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2639 PAGE_SIZE);
Roland Dreier225c7b12007-05-08 18:00:38 -07002640 if (!ibdev->uar_map)
2641 goto err_uar;
Jack Morgenstein26c6bc72007-05-13 17:18:23 +03002642 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
Roland Dreier225c7b12007-05-08 18:00:38 -07002643
Roland Dreier225c7b12007-05-08 18:00:38 -07002644 ibdev->dev = dev;
Moni Shouac6215742015-02-03 16:48:39 +02002645 ibdev->bond_next_port = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002646
2647 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2648 ibdev->ib_dev.owner = THIS_MODULE;
2649 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
Roland Dreier95d04f02008-07-23 08:12:26 -07002650 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
Roland Dreier22e7ef92009-01-09 13:22:29 -08002651 ibdev->num_ports = num_ports;
Moni Shouaa5750092015-02-03 16:48:37 +02002652 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
2653 1 : ibdev->num_ports;
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -08002654 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
Bart Van Assched66c88a82017-01-20 13:04:20 -08002655 ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev;
Moni Shoua5070cd22015-07-30 18:33:30 +03002656 ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev;
2657 ibdev->ib_dev.add_gid = mlx4_ib_add_gid;
2658 ibdev->ib_dev.del_gid = mlx4_ib_del_gid;
Roland Dreier225c7b12007-05-08 18:00:38 -07002659
Or Gerlitz08ff3232012-10-21 14:59:24 +00002660 if (dev->caps.userspace_caps)
2661 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2662 else
2663 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2664
Roland Dreier225c7b12007-05-08 18:00:38 -07002665 ibdev->ib_dev.uverbs_cmd_mask =
2666 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2667 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2668 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2669 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2670 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2671 (1ull << IB_USER_VERBS_CMD_REG_MR) |
Matan Barak93769322014-07-31 11:01:30 +03002672 (1ull << IB_USER_VERBS_CMD_REREG_MR) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002673 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2674 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2675 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -07002676 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002677 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2678 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2679 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002680 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002681 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2682 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2683 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2684 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2685 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
Jack Morgenstein65541cb2007-06-21 13:03:11 +03002686 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
Sean Hefty18abd5e2011-06-02 10:43:26 -07002687 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
Sean Hefty42849b22011-08-11 13:57:43 -07002688 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
2689 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
Roland Dreier225c7b12007-05-08 18:00:38 -07002690
2691 ibdev->ib_dev.query_device = mlx4_ib_query_device;
2692 ibdev->ib_dev.query_port = mlx4_ib_query_port;
Eli Cohenfa417f72010-10-24 21:08:52 -07002693 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
Roland Dreier225c7b12007-05-08 18:00:38 -07002694 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
2695 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
2696 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
2697 ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
2698 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
2699 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
2700 ibdev->ib_dev.mmap = mlx4_ib_mmap;
2701 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
2702 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
2703 ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
2704 ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
2705 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
2706 ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
2707 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
Jack Morgenstein65541cb2007-06-21 13:03:11 +03002708 ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
Roland Dreier225c7b12007-05-08 18:00:38 -07002709 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
2710 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
2711 ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
2712 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002713 ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
Roland Dreier225c7b12007-05-08 18:00:38 -07002714 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
2715 ibdev->ib_dev.post_send = mlx4_ib_post_send;
2716 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
2717 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
Eli Cohen3fdcb972008-04-16 21:09:33 -07002718 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -07002719 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
Roland Dreier225c7b12007-05-08 18:00:38 -07002720 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
2721 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
2722 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
2723 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
2724 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
Matan Barak93769322014-07-31 11:01:30 +03002725 ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
Roland Dreier225c7b12007-05-08 18:00:38 -07002726 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
Sagi Grimberg679e34d2015-07-30 10:32:42 +03002727 ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr;
Sagi Grimberg1b2cd0f2015-10-13 19:11:27 +03002728 ibdev->ib_dev.map_mr_sg = mlx4_ib_map_mr_sg;
Roland Dreier225c7b12007-05-08 18:00:38 -07002729 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
2730 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
2731 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
Ira Weiny77386132015-05-13 20:02:58 -04002732 ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
Ira Weinye9db59f2016-06-15 02:22:00 -04002733 ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str;
Yishai Hadasae184dd2015-08-13 18:32:06 +03002734 ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
Roland Dreier225c7b12007-05-08 18:00:38 -07002735
Guy Levi400b1eb2017-07-04 16:24:24 +03002736 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
2737 ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
2738 IB_LINK_LAYER_ETHERNET) ||
2739 (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
2740 IB_LINK_LAYER_ETHERNET))) {
2741 ibdev->ib_dev.create_wq = mlx4_ib_create_wq;
2742 ibdev->ib_dev.modify_wq = mlx4_ib_modify_wq;
2743 ibdev->ib_dev.destroy_wq = mlx4_ib_destroy_wq;
Guy Levib8d46ca2017-07-04 16:24:25 +03002744 ibdev->ib_dev.create_rwq_ind_table =
2745 mlx4_ib_create_rwq_ind_table;
2746 ibdev->ib_dev.destroy_rwq_ind_table =
2747 mlx4_ib_destroy_rwq_ind_table;
Guy Levi400b1eb2017-07-04 16:24:24 +03002748 ibdev->ib_dev.uverbs_ex_cmd_mask |=
Guy Levib8d46ca2017-07-04 16:24:25 +03002749 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
2750 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
2751 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
2752 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
2753 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
Guy Levi400b1eb2017-07-04 16:24:24 +03002754 }
2755
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00002756 if (!mlx4_is_slave(ibdev->dev)) {
2757 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
2758 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
2759 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
2760 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
2761 }
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +03002762
Shani Michaelib4253882013-02-06 16:19:16 +00002763 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2764 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2765 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
Shani Michaelib4253882013-02-06 16:19:16 +00002766 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2767
2768 ibdev->ib_dev.uverbs_cmd_mask |=
2769 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2770 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2771 }
2772
Sean Hefty012a8ff2011-06-02 09:01:33 -07002773 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2774 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2775 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2776 ibdev->ib_dev.uverbs_cmd_mask |=
2777 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2778 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2779 }
2780
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002781 if (check_flow_steering_support(dev)) {
Matan Barak0a9b7d52013-11-07 15:25:15 +02002782 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002783 ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
2784 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
2785
Yann Droneaudf21519b2013-11-06 23:21:49 +01002786 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2787 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2788 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002789 }
2790
Matan Barak4b664c42015-06-11 16:35:27 +03002791 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2792 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
Eran Ben Elishafbfb6622015-10-15 14:44:42 +03002793 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
2794 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
Matan Barak4b664c42015-06-11 16:35:27 +03002795
Shlomo Pongratze605b742012-04-29 17:04:27 +03002796 mlx4_ib_alloc_eqs(dev, ibdev);
2797
Eli Cohenfa417f72010-10-24 21:08:52 -07002798 spin_lock_init(&iboe->lock);
2799
Roland Dreier225c7b12007-05-08 18:00:38 -07002800 if (init_node_data(ibdev))
2801 goto err_map;
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03002802 mlx4_init_sl2vl_tbl(ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -07002803
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002804 for (i = 0; i < ibdev->num_ports; ++i) {
2805 mutex_init(&ibdev->counters_table[i].mutex);
2806 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2807 }
2808
Moni Shouaa5750092015-02-03 16:48:37 +02002809 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2810 for (i = 0; i < num_req_counters; ++i) {
Matan Barak9433c182014-05-15 15:29:28 +03002811 mutex_init(&ibdev->qp1_proxy_lock[i]);
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002812 allocated = 0;
Or Gerlitzcfcde112011-06-15 14:49:57 +00002813 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2814 IB_LINK_LAYER_ETHERNET) {
Moshe Shemeshf3301872017-06-21 09:29:36 +03002815 err = mlx4_counter_alloc(ibdev->dev, &counter_index,
2816 MLX4_RES_USAGE_DRIVER);
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002817 /* if failed to allocate a new counter, use default */
Or Gerlitzcfcde112011-06-15 14:49:57 +00002818 if (err)
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002819 counter_index =
2820 mlx4_get_default_counter_index(dev,
2821 i + 1);
2822 else
2823 allocated = 1;
2824 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2825 counter_index = mlx4_get_default_counter_index(dev,
2826 i + 1);
Dan Carpenter3839d8a2014-03-28 11:21:39 +03002827 }
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002828 new_counter_index = kmalloc(sizeof(*new_counter_index),
2829 GFP_KERNEL);
2830 if (!new_counter_index) {
2831 if (allocated)
2832 mlx4_counter_free(ibdev->dev, counter_index);
2833 goto err_counter;
2834 }
2835 new_counter_index->index = counter_index;
2836 new_counter_index->allocated = allocated;
2837 list_add_tail(&new_counter_index->list,
2838 &ibdev->counters_table[i].counters_list);
2839 ibdev->counters_table[i].default_counter = counter_index;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002840 pr_info("counter index %d for port %d allocated %d\n",
2841 counter_index, i + 1, allocated);
Or Gerlitzcfcde112011-06-15 14:49:57 +00002842 }
Moni Shouaa5750092015-02-03 16:48:37 +02002843 if (mlx4_is_bonded(dev))
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002844 for (i = 1; i < ibdev->num_ports ; ++i) {
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002845 new_counter_index =
2846 kmalloc(sizeof(struct counter_index),
2847 GFP_KERNEL);
2848 if (!new_counter_index)
2849 goto err_counter;
2850 new_counter_index->index = counter_index;
2851 new_counter_index->allocated = 0;
2852 list_add_tail(&new_counter_index->list,
2853 &ibdev->counters_table[i].counters_list);
2854 ibdev->counters_table[i].default_counter =
2855 counter_index;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002856 }
Or Gerlitzcfcde112011-06-15 14:49:57 +00002857
Matan Barak41966702014-02-02 17:06:47 +02002858 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2859 ib_num_ports++;
2860
Roland Dreier225c7b12007-05-08 18:00:38 -07002861 spin_lock_init(&ibdev->sm_lock);
2862 mutex_init(&ibdev->cap_mask_mutex);
Yishai Hadas35f05da2015-02-08 11:49:34 +02002863 INIT_LIST_HEAD(&ibdev->qp_list);
2864 spin_lock_init(&ibdev->reset_flow_resource_lock);
Roland Dreier225c7b12007-05-08 18:00:38 -07002865
Matan Barak41966702014-02-02 17:06:47 +02002866 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2867 ib_num_ports) {
Matan Barakc1c98502013-11-07 15:25:17 +02002868 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2869 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2870 MLX4_IB_UC_STEER_QPN_ALIGN,
Moshe Shemeshf3301872017-06-21 09:29:36 +03002871 &ibdev->steer_qpn_base, 0,
2872 MLX4_RES_USAGE_DRIVER);
Matan Barakc1c98502013-11-07 15:25:17 +02002873 if (err)
2874 goto err_counter;
2875
2876 ibdev->ib_uc_qpns_bitmap =
2877 kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2878 sizeof(long),
2879 GFP_KERNEL);
Leon Romanovsky15d46262016-11-03 16:44:12 +02002880 if (!ibdev->ib_uc_qpns_bitmap)
Matan Barakc1c98502013-11-07 15:25:17 +02002881 goto err_steer_qp_release;
Matan Barakc1c98502013-11-07 15:25:17 +02002882
Eran Ben Elisha1f22e452016-11-10 11:31:00 +02002883 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
2884 bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2885 ibdev->steer_qpn_count);
2886 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2887 dev, ibdev->steer_qpn_base,
2888 ibdev->steer_qpn_base +
2889 ibdev->steer_qpn_count - 1);
2890 if (err)
2891 goto err_steer_free_bitmap;
2892 } else {
2893 bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2894 ibdev->steer_qpn_count);
2895 }
Matan Barakc1c98502013-11-07 15:25:17 +02002896 }
2897
Jack Morgenstein3e0629c2014-09-11 14:11:17 +03002898 for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2899 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2900
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002901 if (mlx4_ib_alloc_diag_counters(ibdev))
Matan Barakc1c98502013-11-07 15:25:17 +02002902 goto err_steer_free_bitmap;
Roland Dreier225c7b12007-05-08 18:00:38 -07002903
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002904 if (ib_register_device(&ibdev->ib_dev, NULL))
2905 goto err_diag_counters;
2906
Roland Dreier225c7b12007-05-08 18:00:38 -07002907 if (mlx4_ib_mad_init(ibdev))
2908 goto err_reg;
2909
Jack Morgensteinfc065732012-08-03 08:40:42 +00002910 if (mlx4_ib_init_sriov(ibdev))
2911 goto err_mad;
2912
Majd Dibbinydd77abf2017-03-19 11:01:28 +02002913 if (!iboe->nb.notifier_call) {
2914 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2915 err = register_netdevice_notifier(&iboe->nb);
2916 if (err) {
2917 iboe->nb.notifier_call = NULL;
2918 goto err_notif;
Moni Shouad487ee72013-12-12 18:03:13 +02002919 }
Majd Dibbinydd77abf2017-03-19 11:01:28 +02002920 }
2921 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2922 err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2923 if (err)
2924 goto err_notif;
Eli Cohenfa417f72010-10-24 21:08:52 -07002925 }
2926
Jack Morgenstein035b1032012-05-10 23:28:09 +03002927 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002928 if (device_create_file(&ibdev->ib_dev.dev,
Jack Morgenstein035b1032012-05-10 23:28:09 +03002929 mlx4_class_attributes[j]))
Eli Cohenfa417f72010-10-24 21:08:52 -07002930 goto err_notif;
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002931 }
2932
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07002933 ibdev->ib_active = true;
Jiri Pirko09d4d082016-02-26 17:32:24 +01002934 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2935 devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
2936 &ibdev->ib_dev);
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07002937
Jack Morgenstein54679e12012-08-03 08:40:43 +00002938 if (mlx4_is_mfunc(ibdev->dev))
2939 init_pkeys(ibdev);
2940
Jack Morgenstein3806d082012-08-03 08:40:58 +00002941 /* create paravirt contexts for any VFs which are active */
2942 if (mlx4_is_master(ibdev->dev)) {
2943 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2944 if (j == mlx4_master_func_num(ibdev->dev))
2945 continue;
2946 if (mlx4_is_slave_active(ibdev->dev, j))
2947 do_slave_init(ibdev, j, 1);
2948 }
2949 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002950 return ibdev;
2951
Eli Cohenfa417f72010-10-24 21:08:52 -07002952err_notif:
Moni Shouad487ee72013-12-12 18:03:13 +02002953 if (ibdev->iboe.nb.notifier_call) {
2954 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2955 pr_warn("failure unregistering notifier\n");
2956 ibdev->iboe.nb.notifier_call = NULL;
2957 }
Eli Cohenfa417f72010-10-24 21:08:52 -07002958 flush_workqueue(wq);
2959
Jack Morgensteinfc065732012-08-03 08:40:42 +00002960 mlx4_ib_close_sriov(ibdev);
2961
2962err_mad:
2963 mlx4_ib_mad_cleanup(ibdev);
2964
Roland Dreier225c7b12007-05-08 18:00:38 -07002965err_reg:
2966 ib_unregister_device(&ibdev->ib_dev);
2967
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002968err_diag_counters:
2969 mlx4_ib_diag_cleanup(ibdev);
2970
Matan Barakc1c98502013-11-07 15:25:17 +02002971err_steer_free_bitmap:
2972 kfree(ibdev->ib_uc_qpns_bitmap);
2973
2974err_steer_qp_release:
2975 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
2976 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2977 ibdev->steer_qpn_count);
Or Gerlitzcfcde112011-06-15 14:49:57 +00002978err_counter:
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002979 for (i = 0; i < ibdev->num_ports; ++i)
2980 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2981
Roland Dreier225c7b12007-05-08 18:00:38 -07002982err_map:
Jack Morgenstein99e68902017-03-21 12:57:05 +02002983 mlx4_ib_free_eqs(dev, ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -07002984 iounmap(ibdev->uar_map);
2985
2986err_uar:
2987 mlx4_uar_free(dev, &ibdev->priv_uar);
2988
2989err_pd:
2990 mlx4_pd_free(dev, ibdev->priv_pdn);
2991
2992err_dealloc:
2993 ib_dealloc_device(&ibdev->ib_dev);
2994
2995 return NULL;
2996}
2997
Matan Barakc1c98502013-11-07 15:25:17 +02002998int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2999{
3000 int offset;
3001
3002 WARN_ON(!dev->ib_uc_qpns_bitmap);
3003
3004 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
3005 dev->steer_qpn_count,
3006 get_count_order(count));
3007 if (offset < 0)
3008 return offset;
3009
3010 *qpn = dev->steer_qpn_base + offset;
3011 return 0;
3012}
3013
3014void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
3015{
3016 if (!qpn ||
3017 dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
3018 return;
3019
3020 BUG_ON(qpn < dev->steer_qpn_base);
3021
3022 bitmap_release_region(dev->ib_uc_qpns_bitmap,
3023 qpn - dev->steer_qpn_base,
3024 get_count_order(count));
3025}
3026
3027int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
3028 int is_attach)
3029{
3030 int err;
3031 size_t flow_size;
3032 struct ib_flow_attr *flow = NULL;
3033 struct ib_flow_spec_ib *ib_spec;
3034
3035 if (is_attach) {
3036 flow_size = sizeof(struct ib_flow_attr) +
3037 sizeof(struct ib_flow_spec_ib);
3038 flow = kzalloc(flow_size, GFP_KERNEL);
3039 if (!flow)
3040 return -ENOMEM;
3041 flow->port = mqp->port;
3042 flow->num_of_specs = 1;
3043 flow->size = flow_size;
3044 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
3045 ib_spec->type = IB_FLOW_SPEC_IB;
3046 ib_spec->size = sizeof(struct ib_flow_spec_ib);
3047 /* Add an empty rule for IB L2 */
3048 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
3049
3050 err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
3051 IB_FLOW_DOMAIN_NIC,
3052 MLX4_FS_REGULAR,
3053 &mqp->reg_id);
3054 } else {
3055 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
3056 }
3057 kfree(flow);
3058 return err;
3059}
3060
Roland Dreier225c7b12007-05-08 18:00:38 -07003061static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
3062{
3063 struct mlx4_ib_dev *ibdev = ibdev_ptr;
3064 int p;
Jiri Pirko09d4d082016-02-26 17:32:24 +01003065 int i;
Roland Dreier225c7b12007-05-08 18:00:38 -07003066
Jiri Pirko09d4d082016-02-26 17:32:24 +01003067 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
3068 devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
Moni Shoua4bf97152014-08-21 14:28:42 +03003069 ibdev->ib_active = false;
3070 flush_workqueue(wq);
3071
Jack Morgensteinfc065732012-08-03 08:40:42 +00003072 mlx4_ib_close_sriov(ibdev);
Yevgeny Petrilina6a47772009-03-18 19:49:54 -07003073 mlx4_ib_mad_cleanup(ibdev);
3074 ib_unregister_device(&ibdev->ib_dev);
Mark Bloch3f85f2a2016-07-19 20:54:58 +03003075 mlx4_ib_diag_cleanup(ibdev);
Eli Cohenfa417f72010-10-24 21:08:52 -07003076 if (ibdev->iboe.nb.notifier_call) {
3077 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03003078 pr_warn("failure unregistering notifier\n");
Eli Cohenfa417f72010-10-24 21:08:52 -07003079 ibdev->iboe.nb.notifier_call = NULL;
3080 }
Matan Barakc1c98502013-11-07 15:25:17 +02003081
3082 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3083 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
3084 ibdev->steer_qpn_count);
3085 kfree(ibdev->ib_uc_qpns_bitmap);
3086 }
3087
Eli Cohenfa417f72010-10-24 21:08:52 -07003088 iounmap(ibdev->uar_map);
Or Gerlitzcfcde112011-06-15 14:49:57 +00003089 for (p = 0; p < ibdev->num_ports; ++p)
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03003090 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
3091
Eli Cohenfa417f72010-10-24 21:08:52 -07003092 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
Roland Dreier225c7b12007-05-08 18:00:38 -07003093 mlx4_CLOSE_PORT(dev, p);
3094
Shlomo Pongratze605b742012-04-29 17:04:27 +03003095 mlx4_ib_free_eqs(dev, ibdev);
3096
Roland Dreier225c7b12007-05-08 18:00:38 -07003097 mlx4_uar_free(dev, &ibdev->priv_uar);
3098 mlx4_pd_free(dev, ibdev->priv_pdn);
3099 ib_dealloc_device(&ibdev->ib_dev);
3100}
3101
Jack Morgensteinfc065732012-08-03 08:40:42 +00003102static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
3103{
3104 struct mlx4_ib_demux_work **dm = NULL;
3105 struct mlx4_dev *dev = ibdev->dev;
3106 int i;
3107 unsigned long flags;
Matan Barak449fc482014-03-19 18:11:52 +02003108 struct mlx4_active_ports actv_ports;
3109 unsigned int ports;
3110 unsigned int first_port;
Jack Morgensteinfc065732012-08-03 08:40:42 +00003111
3112 if (!mlx4_is_master(dev))
3113 return;
3114
Matan Barak449fc482014-03-19 18:11:52 +02003115 actv_ports = mlx4_get_active_ports(dev, slave);
3116 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
3117 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
3118
3119 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
Leon Romanovsky15d46262016-11-03 16:44:12 +02003120 if (!dm)
Maninder Singha39a98f2015-07-08 09:43:35 +05303121 return;
Jack Morgensteinfc065732012-08-03 08:40:42 +00003122
Matan Barak449fc482014-03-19 18:11:52 +02003123 for (i = 0; i < ports; i++) {
Jack Morgensteinfc065732012-08-03 08:40:42 +00003124 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
3125 if (!dm[i]) {
Maninder Singha39a98f2015-07-08 09:43:35 +05303126 while (--i >= 0)
3127 kfree(dm[i]);
Jack Morgensteinfc065732012-08-03 08:40:42 +00003128 goto out;
3129 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00003130 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
Matan Barak449fc482014-03-19 18:11:52 +02003131 dm[i]->port = first_port + i + 1;
Jack Morgensteinfc065732012-08-03 08:40:42 +00003132 dm[i]->slave = slave;
3133 dm[i]->do_init = do_init;
3134 dm[i]->dev = ibdev;
Doug Ledfordd9a047a2015-07-09 10:21:08 -04003135 }
3136 /* initialize or tear down tunnel QPs for the slave */
3137 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
3138 if (!ibdev->sriov.is_going_down) {
3139 for (i = 0; i < ports; i++)
Jack Morgensteinfc065732012-08-03 08:40:42 +00003140 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
3141 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
Doug Ledfordd9a047a2015-07-09 10:21:08 -04003142 } else {
3143 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3144 for (i = 0; i < ports; i++)
3145 kfree(dm[i]);
Jack Morgensteinfc065732012-08-03 08:40:42 +00003146 }
3147out:
Syam Sidhardhanc89d1272013-02-24 23:20:05 +00003148 kfree(dm);
Jack Morgensteinfc065732012-08-03 08:40:42 +00003149 return;
3150}
3151
Yishai Hadas35f05da2015-02-08 11:49:34 +02003152static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
3153{
3154 struct mlx4_ib_qp *mqp;
3155 unsigned long flags_qp;
3156 unsigned long flags_cq;
3157 struct mlx4_ib_cq *send_mcq, *recv_mcq;
3158 struct list_head cq_notify_list;
3159 struct mlx4_cq *mcq;
3160 unsigned long flags;
3161
3162 pr_warn("mlx4_ib_handle_catas_error was started\n");
3163 INIT_LIST_HEAD(&cq_notify_list);
3164
3165 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3166 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3167
3168 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3169 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
3170 if (mqp->sq.tail != mqp->sq.head) {
3171 send_mcq = to_mcq(mqp->ibqp.send_cq);
3172 spin_lock_irqsave(&send_mcq->lock, flags_cq);
3173 if (send_mcq->mcq.comp &&
3174 mqp->ibqp.send_cq->comp_handler) {
3175 if (!send_mcq->mcq.reset_notify_added) {
3176 send_mcq->mcq.reset_notify_added = 1;
3177 list_add_tail(&send_mcq->mcq.reset_notify,
3178 &cq_notify_list);
3179 }
3180 }
3181 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
3182 }
3183 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
3184 /* Now, handle the QP's receive queue */
3185 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
3186 /* no handling is needed for SRQ */
3187 if (!mqp->ibqp.srq) {
3188 if (mqp->rq.tail != mqp->rq.head) {
3189 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
3190 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
3191 if (recv_mcq->mcq.comp &&
3192 mqp->ibqp.recv_cq->comp_handler) {
3193 if (!recv_mcq->mcq.reset_notify_added) {
3194 recv_mcq->mcq.reset_notify_added = 1;
3195 list_add_tail(&recv_mcq->mcq.reset_notify,
3196 &cq_notify_list);
3197 }
3198 }
3199 spin_unlock_irqrestore(&recv_mcq->lock,
3200 flags_cq);
3201 }
3202 }
3203 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
3204 }
3205
3206 list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
3207 mcq->comp(mcq);
3208 }
3209 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3210 pr_warn("mlx4_ib_handle_catas_error ended\n");
3211}
3212
Moni Shouaa5750092015-02-03 16:48:37 +02003213static void handle_bonded_port_state_event(struct work_struct *work)
3214{
3215 struct ib_event_work *ew =
3216 container_of(work, struct ib_event_work, work);
3217 struct mlx4_ib_dev *ibdev = ew->ib_dev;
3218 enum ib_port_state bonded_port_state = IB_PORT_NOP;
3219 int i;
3220 struct ib_event ibev;
3221
3222 kfree(ew);
3223 spin_lock_bh(&ibdev->iboe.lock);
3224 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
3225 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
Moni Shoua217e8b12015-03-18 16:51:35 +02003226 enum ib_port_state curr_port_state;
Moni Shouaa5750092015-02-03 16:48:37 +02003227
Moni Shoua217e8b12015-03-18 16:51:35 +02003228 if (!curr_netdev)
3229 continue;
3230
3231 curr_port_state =
Moni Shouaa5750092015-02-03 16:48:37 +02003232 (netif_running(curr_netdev) &&
3233 netif_carrier_ok(curr_netdev)) ?
3234 IB_PORT_ACTIVE : IB_PORT_DOWN;
3235
3236 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
3237 curr_port_state : IB_PORT_ACTIVE;
3238 }
3239 spin_unlock_bh(&ibdev->iboe.lock);
3240
3241 ibev.device = &ibdev->ib_dev;
3242 ibev.element.port_num = 1;
3243 ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
3244 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3245
3246 ib_dispatch_event(&ibev);
3247}
3248
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03003249void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
3250{
3251 u64 sl2vl;
3252 int err;
3253
3254 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
3255 if (err) {
3256 pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n",
3257 port, err);
3258 sl2vl = 0;
3259 }
3260 atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
3261}
3262
3263static void ib_sl2vl_update_work(struct work_struct *work)
3264{
3265 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
3266 struct mlx4_ib_dev *mdev = ew->ib_dev;
3267 int port = ew->port;
3268
3269 mlx4_ib_sl2vl_update(mdev, port);
3270
3271 kfree(ew);
3272}
3273
3274void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
3275 int port)
3276{
3277 struct ib_event_work *ew;
3278
3279 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3280 if (ew) {
3281 INIT_WORK(&ew->work, ib_sl2vl_update_work);
3282 ew->port = port;
3283 ew->ib_dev = ibdev;
3284 queue_work(wq, &ew->work);
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03003285 }
3286}
3287
Roland Dreier225c7b12007-05-08 18:00:38 -07003288static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003289 enum mlx4_dev_event event, unsigned long param)
Roland Dreier225c7b12007-05-08 18:00:38 -07003290{
3291 struct ib_event ibev;
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07003292 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003293 struct mlx4_eqe *eqe = NULL;
3294 struct ib_event_work *ew;
Jack Morgensteinfc065732012-08-03 08:40:42 +00003295 int p = 0;
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003296
Moni Shouaa5750092015-02-03 16:48:37 +02003297 if (mlx4_is_bonded(dev) &&
3298 ((event == MLX4_DEV_EVENT_PORT_UP) ||
3299 (event == MLX4_DEV_EVENT_PORT_DOWN))) {
3300 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3301 if (!ew)
3302 return;
3303 INIT_WORK(&ew->work, handle_bonded_port_state_event);
3304 ew->ib_dev = ibdev;
3305 queue_work(wq, &ew->work);
3306 return;
3307 }
3308
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003309 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
3310 eqe = (struct mlx4_eqe *)param;
3311 else
Jack Morgensteinfc065732012-08-03 08:40:42 +00003312 p = (int) param;
Roland Dreier225c7b12007-05-08 18:00:38 -07003313
3314 switch (event) {
Roland Dreier37608ee2008-04-16 21:01:08 -07003315 case MLX4_DEV_EVENT_PORT_UP:
Jack Morgensteinfc065732012-08-03 08:40:42 +00003316 if (p > ibdev->num_ports)
3317 return;
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03003318 if (!mlx4_is_slave(dev) &&
Jack Morgensteina0c64a12012-08-03 08:40:49 +00003319 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
3320 IB_LINK_LAYER_INFINIBAND) {
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03003321 if (mlx4_is_master(dev))
3322 mlx4_ib_invalidate_all_guid_record(ibdev, p);
3323 if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
3324 !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
3325 mlx4_sched_ib_sl2vl_update_work(ibdev, p);
Jack Morgensteina0c64a12012-08-03 08:40:49 +00003326 }
Roland Dreier37608ee2008-04-16 21:01:08 -07003327 ibev.event = IB_EVENT_PORT_ACTIVE;
Roland Dreier225c7b12007-05-08 18:00:38 -07003328 break;
3329
Roland Dreier37608ee2008-04-16 21:01:08 -07003330 case MLX4_DEV_EVENT_PORT_DOWN:
Jack Morgensteinfc065732012-08-03 08:40:42 +00003331 if (p > ibdev->num_ports)
3332 return;
Roland Dreier37608ee2008-04-16 21:01:08 -07003333 ibev.event = IB_EVENT_PORT_ERR;
3334 break;
3335
3336 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07003337 ibdev->ib_active = false;
Roland Dreier225c7b12007-05-08 18:00:38 -07003338 ibev.event = IB_EVENT_DEVICE_FATAL;
Yishai Hadas35f05da2015-02-08 11:49:34 +02003339 mlx4_ib_handle_catas_error(ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -07003340 break;
3341
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003342 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
3343 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
Leon Romanovsky15d46262016-11-03 16:44:12 +02003344 if (!ew)
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003345 break;
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003346
3347 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
3348 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
3349 ew->ib_dev = ibdev;
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00003350 /* need to queue only for port owner, which uses GEN_EQE */
3351 if (mlx4_is_master(dev))
3352 queue_work(wq, &ew->work);
3353 else
3354 handle_port_mgmt_change_event(&ew->work);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003355 return;
3356
Jack Morgensteinfc065732012-08-03 08:40:42 +00003357 case MLX4_DEV_EVENT_SLAVE_INIT:
3358 /* here, p is the slave id */
3359 do_slave_init(ibdev, p, 1);
Yishai Hadasee59fa02015-03-03 17:28:49 +02003360 if (mlx4_is_master(dev)) {
3361 int i;
3362
3363 for (i = 1; i <= ibdev->num_ports; i++) {
3364 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3365 == IB_LINK_LAYER_INFINIBAND)
3366 mlx4_ib_slave_alias_guid_event(ibdev,
3367 p, i,
3368 1);
3369 }
3370 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00003371 return;
3372
3373 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
Yishai Hadasee59fa02015-03-03 17:28:49 +02003374 if (mlx4_is_master(dev)) {
3375 int i;
3376
3377 for (i = 1; i <= ibdev->num_ports; i++) {
3378 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3379 == IB_LINK_LAYER_INFINIBAND)
3380 mlx4_ib_slave_alias_guid_event(ibdev,
3381 p, i,
3382 0);
3383 }
3384 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00003385 /* here, p is the slave id */
3386 do_slave_init(ibdev, p, 0);
3387 return;
3388
Roland Dreier225c7b12007-05-08 18:00:38 -07003389 default:
3390 return;
3391 }
3392
3393 ibev.device = ibdev_ptr;
Moni Shouaa5750092015-02-03 16:48:37 +02003394 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
Roland Dreier225c7b12007-05-08 18:00:38 -07003395
3396 ib_dispatch_event(&ibev);
3397}
3398
3399static struct mlx4_interface mlx4_ib_interface = {
Eli Cohenfa417f72010-10-24 21:08:52 -07003400 .add = mlx4_ib_add,
3401 .remove = mlx4_ib_remove,
3402 .event = mlx4_ib_event,
Moni Shouaa5750092015-02-03 16:48:37 +02003403 .protocol = MLX4_PROT_IB_IPV6,
3404 .flags = MLX4_INTFF_BONDING
Roland Dreier225c7b12007-05-08 18:00:38 -07003405};
3406
3407static int __init mlx4_ib_init(void)
3408{
Eli Cohenfa417f72010-10-24 21:08:52 -07003409 int err;
3410
Bhaktipriya Shridhar41cd3942016-08-15 23:42:48 +05303411 wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
Eli Cohenfa417f72010-10-24 21:08:52 -07003412 if (!wq)
3413 return -ENOMEM;
3414
Oren Duerb9c5d6a2012-08-03 08:40:46 +00003415 err = mlx4_ib_mcg_init();
3416 if (err)
3417 goto clean_wq;
3418
Eli Cohenfa417f72010-10-24 21:08:52 -07003419 err = mlx4_register_interface(&mlx4_ib_interface);
Oren Duerb9c5d6a2012-08-03 08:40:46 +00003420 if (err)
3421 goto clean_mcg;
Eli Cohenfa417f72010-10-24 21:08:52 -07003422
3423 return 0;
Oren Duerb9c5d6a2012-08-03 08:40:46 +00003424
3425clean_mcg:
3426 mlx4_ib_mcg_destroy();
3427
3428clean_wq:
3429 destroy_workqueue(wq);
3430 return err;
Roland Dreier225c7b12007-05-08 18:00:38 -07003431}
3432
3433static void __exit mlx4_ib_cleanup(void)
3434{
3435 mlx4_unregister_interface(&mlx4_ib_interface);
Oren Duerb9c5d6a2012-08-03 08:40:46 +00003436 mlx4_ib_mcg_destroy();
Eli Cohenfa417f72010-10-24 21:08:52 -07003437 destroy_workqueue(wq);
Roland Dreier225c7b12007-05-08 18:00:38 -07003438}
3439
3440module_init(mlx4_ib_init);
3441module_exit(mlx4_ib_cleanup);