blob: 38be8dc2932e3cf2ffd357b567c5dfb01c874992 [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07003 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070037#include <linux/errno.h>
Eli Cohenfa417f72010-10-24 21:08:52 -070038#include <linux/netdevice.h>
39#include <linux/inetdevice.h>
40#include <linux/rtnetlink.h>
Eli Cohen4c3eb3c2010-08-26 17:19:22 +030041#include <linux/if_vlan.h>
Moni Shouad487ee72013-12-12 18:03:13 +020042#include <net/ipv6.h>
43#include <net/addrconf.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070044
45#include <rdma/ib_smi.h>
46#include <rdma/ib_user_verbs.h>
Eli Cohenfa417f72010-10-24 21:08:52 -070047#include <rdma/ib_addr.h>
Moni Shouae26be1b2015-07-30 18:33:29 +030048#include <rdma/ib_cache.h>
49
50#include <net/bonding.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070051
52#include <linux/mlx4/driver.h>
53#include <linux/mlx4/cmd.h>
Matan Barak9433c182014-05-15 15:29:28 +030054#include <linux/mlx4/qp.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070055
56#include "mlx4_ib.h"
57#include "user.h"
58
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +030059#define DRV_NAME MLX4_IB_DRV_NAME
Amir Vadai169a1d82014-02-19 17:47:31 +020060#define DRV_VERSION "2.2-1"
61#define DRV_RELDATE "Feb 2014"
Roland Dreier225c7b12007-05-08 18:00:38 -070062
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030063#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
Matan Baraka37a1a42013-11-07 15:25:16 +020064#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
Markus Stockhausen50e2ec92014-08-13 14:07:30 +000065#define MLX4_IB_CARD_REV_A0 0xA0
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030066
Roland Dreier225c7b12007-05-08 18:00:38 -070067MODULE_AUTHOR("Roland Dreier");
68MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
69MODULE_LICENSE("Dual BSD/GPL");
70MODULE_VERSION(DRV_VERSION);
71
Yishai Hadas56c1d232015-02-12 09:49:43 +020072int mlx4_ib_sm_guid_assign = 0;
Jack Morgensteina0c64a12012-08-03 08:40:49 +000073module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
Yishai Hadas56c1d232015-02-12 09:49:43 +020074MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
Jack Morgensteina0c64a12012-08-03 08:40:49 +000075
Roland Dreier68f39482008-02-04 20:20:44 -080076static const char mlx4_ib_version[] =
Roland Dreier225c7b12007-05-08 18:00:38 -070077 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
78 DRV_VERSION " (" DRV_RELDATE ")\n";
79
Jack Morgenstein3806d082012-08-03 08:40:58 +000080static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
81
Eli Cohenfa417f72010-10-24 21:08:52 -070082static struct workqueue_struct *wq;
83
Roland Dreier225c7b12007-05-08 18:00:38 -070084static void init_query_mad(struct ib_smp *mad)
85{
86 mad->base_version = 1;
87 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
88 mad->class_version = 1;
89 mad->method = IB_MGMT_METHOD_GET;
90}
91
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030092static int check_flow_steering_support(struct mlx4_dev *dev)
93{
Matan Barak0a9b7d52013-11-07 15:25:15 +020094 int eth_num_ports = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030095 int ib_num_ports = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030096
Matan Barak0a9b7d52013-11-07 15:25:15 +020097 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030098
Matan Barak0a9b7d52013-11-07 15:25:15 +020099 if (dmfs) {
100 int i;
101 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
102 eth_num_ports++;
103 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
104 ib_num_ports++;
105 dmfs &= (!ib_num_ports ||
106 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
107 (!eth_num_ports ||
108 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
109 if (ib_num_ports && mlx4_is_mfunc(dev)) {
110 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
111 dmfs = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300112 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300113 }
Matan Barak0a9b7d52013-11-07 15:25:15 +0200114 return dmfs;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300115}
116
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300117static int num_ib_ports(struct mlx4_dev *dev)
118{
119 int ib_ports = 0;
120 int i;
121
122 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
123 ib_ports++;
124
125 return ib_ports;
126}
127
Moni Shouae26be1b2015-07-30 18:33:29 +0300128static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
129{
130 struct mlx4_ib_dev *ibdev = to_mdev(device);
131 struct net_device *dev;
132
133 rcu_read_lock();
134 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
135
136 if (dev) {
137 if (mlx4_is_bonded(ibdev->dev)) {
138 struct net_device *upper = NULL;
139
140 upper = netdev_master_upper_dev_get_rcu(dev);
141 if (upper) {
142 struct net_device *active;
143
144 active = bond_option_active_slave_get_rcu(netdev_priv(upper));
145 if (active)
146 dev = active;
147 }
148 }
149 }
150 if (dev)
151 dev_hold(dev);
152
153 rcu_read_unlock();
154 return dev;
155}
156
157static int mlx4_ib_update_gids(struct gid_entry *gids,
158 struct mlx4_ib_dev *ibdev,
159 u8 port_num)
160{
161 struct mlx4_cmd_mailbox *mailbox;
162 int err;
163 struct mlx4_dev *dev = ibdev->dev;
164 int i;
165 union ib_gid *gid_tbl;
166
167 mailbox = mlx4_alloc_cmd_mailbox(dev);
168 if (IS_ERR(mailbox))
169 return -ENOMEM;
170
171 gid_tbl = mailbox->buf;
172
173 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
174 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
175
176 err = mlx4_cmd(dev, mailbox->dma,
177 MLX4_SET_PORT_GID_TABLE << 8 | port_num,
178 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
179 MLX4_CMD_WRAPPED);
180 if (mlx4_is_bonded(dev))
181 err += mlx4_cmd(dev, mailbox->dma,
182 MLX4_SET_PORT_GID_TABLE << 8 | 2,
183 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
184 MLX4_CMD_WRAPPED);
185
186 mlx4_free_cmd_mailbox(dev, mailbox);
187 return err;
188}
189
190static int mlx4_ib_add_gid(struct ib_device *device,
191 u8 port_num,
192 unsigned int index,
193 const union ib_gid *gid,
194 const struct ib_gid_attr *attr,
195 void **context)
196{
197 struct mlx4_ib_dev *ibdev = to_mdev(device);
198 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
199 struct mlx4_port_gid_table *port_gid_table;
200 int free = -1, found = -1;
201 int ret = 0;
202 int hw_update = 0;
203 int i;
204 struct gid_entry *gids = NULL;
205
206 if (!rdma_cap_roce_gid_table(device, port_num))
207 return -EINVAL;
208
209 if (port_num > MLX4_MAX_PORTS)
210 return -EINVAL;
211
212 if (!context)
213 return -EINVAL;
214
215 port_gid_table = &iboe->gids[port_num - 1];
216 spin_lock_bh(&iboe->lock);
217 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
218 if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid))) {
219 found = i;
220 break;
221 }
222 if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid)))
223 free = i; /* HW has space */
224 }
225
226 if (found < 0) {
227 if (free < 0) {
228 ret = -ENOSPC;
229 } else {
230 port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
231 if (!port_gid_table->gids[free].ctx) {
232 ret = -ENOMEM;
233 } else {
234 *context = port_gid_table->gids[free].ctx;
235 memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid));
236 port_gid_table->gids[free].ctx->real_index = free;
237 port_gid_table->gids[free].ctx->refcount = 1;
238 hw_update = 1;
239 }
240 }
241 } else {
242 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
243 *context = ctx;
244 ctx->refcount++;
245 }
246 if (!ret && hw_update) {
247 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
248 if (!gids) {
249 ret = -ENOMEM;
250 } else {
251 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
252 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
253 }
254 }
255 spin_unlock_bh(&iboe->lock);
256
257 if (!ret && hw_update) {
258 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
259 kfree(gids);
260 }
261
262 return ret;
263}
264
265static int mlx4_ib_del_gid(struct ib_device *device,
266 u8 port_num,
267 unsigned int index,
268 void **context)
269{
270 struct gid_cache_context *ctx = *context;
271 struct mlx4_ib_dev *ibdev = to_mdev(device);
272 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
273 struct mlx4_port_gid_table *port_gid_table;
274 int ret = 0;
275 int hw_update = 0;
276 struct gid_entry *gids = NULL;
277
278 if (!rdma_cap_roce_gid_table(device, port_num))
279 return -EINVAL;
280
281 if (port_num > MLX4_MAX_PORTS)
282 return -EINVAL;
283
284 port_gid_table = &iboe->gids[port_num - 1];
285 spin_lock_bh(&iboe->lock);
286 if (ctx) {
287 ctx->refcount--;
288 if (!ctx->refcount) {
289 unsigned int real_index = ctx->real_index;
290
291 memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid));
292 kfree(port_gid_table->gids[real_index].ctx);
293 port_gid_table->gids[real_index].ctx = NULL;
294 hw_update = 1;
295 }
296 }
297 if (!ret && hw_update) {
298 int i;
299
300 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
301 if (!gids) {
302 ret = -ENOMEM;
303 } else {
304 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
305 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
306 }
307 }
308 spin_unlock_bh(&iboe->lock);
309
310 if (!ret && hw_update) {
311 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
312 kfree(gids);
313 }
314 return ret;
315}
316
317int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
318 u8 port_num, int index)
319{
320 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
321 struct gid_cache_context *ctx = NULL;
322 union ib_gid gid;
323 struct mlx4_port_gid_table *port_gid_table;
324 int real_index = -EINVAL;
325 int i;
326 int ret;
327 unsigned long flags;
328
329 if (port_num > MLX4_MAX_PORTS)
330 return -EINVAL;
331
332 if (mlx4_is_bonded(ibdev->dev))
333 port_num = 1;
334
335 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
336 return index;
337
338 ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid);
339 if (ret)
340 return ret;
341
342 if (!memcmp(&gid, &zgid, sizeof(gid)))
343 return -EINVAL;
344
345 spin_lock_irqsave(&iboe->lock, flags);
346 port_gid_table = &iboe->gids[port_num - 1];
347
348 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
349 if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid))) {
350 ctx = port_gid_table->gids[i].ctx;
351 break;
352 }
353 if (ctx)
354 real_index = ctx->real_index;
355 spin_unlock_irqrestore(&iboe->lock, flags);
356 return real_index;
357}
358
Roland Dreier225c7b12007-05-08 18:00:38 -0700359static int mlx4_ib_query_device(struct ib_device *ibdev,
Matan Barak2528e332015-06-11 16:35:25 +0300360 struct ib_device_attr *props,
361 struct ib_udata *uhw)
Roland Dreier225c7b12007-05-08 18:00:38 -0700362{
363 struct mlx4_ib_dev *dev = to_mdev(ibdev);
364 struct ib_smp *in_mad = NULL;
365 struct ib_smp *out_mad = NULL;
366 int err = -ENOMEM;
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300367 int have_ib_ports;
Matan Barak4b664c42015-06-11 16:35:27 +0300368 struct mlx4_uverbs_ex_query_device cmd;
369 struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
370 struct mlx4_clock_params clock_params;
Roland Dreier225c7b12007-05-08 18:00:38 -0700371
Matan Barak4b664c42015-06-11 16:35:27 +0300372 if (uhw->inlen) {
373 if (uhw->inlen < sizeof(cmd))
374 return -EINVAL;
Matan Barak2528e332015-06-11 16:35:25 +0300375
Matan Barak4b664c42015-06-11 16:35:27 +0300376 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
377 if (err)
378 return err;
379
380 if (cmd.comp_mask)
381 return -EINVAL;
382
383 if (cmd.reserved)
384 return -EINVAL;
385 }
386
387 resp.response_length = offsetof(typeof(resp), response_length) +
388 sizeof(resp.response_length);
Roland Dreier225c7b12007-05-08 18:00:38 -0700389 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
390 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
391 if (!in_mad || !out_mad)
392 goto out;
393
394 init_query_mad(in_mad);
395 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
396
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000397 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
398 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700399 if (err)
400 goto out;
401
402 memset(props, 0, sizeof *props);
403
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300404 have_ib_ports = num_ib_ports(dev->dev);
405
Roland Dreier225c7b12007-05-08 18:00:38 -0700406 props->fw_ver = dev->dev->caps.fw_ver;
407 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
408 IB_DEVICE_PORT_ACTIVE_EVENT |
409 IB_DEVICE_SYS_IMAGE_GUID |
Ron Livne521e5752008-07-14 23:48:48 -0700410 IB_DEVICE_RC_RNR_NAK_GEN |
411 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
Roland Dreier225c7b12007-05-08 18:00:38 -0700412 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
413 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
414 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
415 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300416 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
Roland Dreier225c7b12007-05-08 18:00:38 -0700417 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
418 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
419 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
Eli Cohen8ff095e2008-04-16 21:01:10 -0700420 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
421 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
Markus Stockhausen50e2ec92014-08-13 14:07:30 +0000422 if (dev->dev->caps.max_gso_sz &&
423 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
424 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
Eli Cohenb832be12008-04-16 21:09:27 -0700425 props->device_cap_flags |= IB_DEVICE_UD_TSO;
Roland Dreier95d04f02008-07-23 08:12:26 -0700426 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
427 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
428 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
429 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
430 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
431 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
Sean Hefty0a1405d2011-06-02 11:32:15 -0700432 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
433 props->device_cap_flags |= IB_DEVICE_XRC;
Shani Michaelib4253882013-02-06 16:19:16 +0000434 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
435 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
436 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
437 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
438 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
439 else
440 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
Matan Barak0a9b7d52013-11-07 15:25:15 +0200441 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300442 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
Shani Michaelib4253882013-02-06 16:19:16 +0000443 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700444
Bodong Wang070b3992015-09-22 23:18:11 +0300445 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
446
Roland Dreier225c7b12007-05-08 18:00:38 -0700447 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
448 0xffffff;
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200449 props->vendor_part_id = dev->dev->persist->pdev->device;
Roland Dreier225c7b12007-05-08 18:00:38 -0700450 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
451 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
452
453 props->max_mr_size = ~0ull;
454 props->page_size_cap = dev->dev->caps.page_size_cap;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200455 props->max_qp = dev->dev->quotas.qp;
Sagi Grimbergfc2d0042012-05-24 16:08:08 +0300456 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
Roland Dreier225c7b12007-05-08 18:00:38 -0700457 props->max_sge = min(dev->dev->caps.max_sq_sg,
458 dev->dev->caps.max_rq_sg);
Sagi Grimberg18ebd402015-07-27 18:10:01 -0500459 props->max_sge_rd = props->max_sge;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200460 props->max_cq = dev->dev->quotas.cq;
Roland Dreier225c7b12007-05-08 18:00:38 -0700461 props->max_cqe = dev->dev->caps.max_cqes;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200462 props->max_mr = dev->dev->quotas.mpt;
Roland Dreier225c7b12007-05-08 18:00:38 -0700463 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
464 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
465 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
466 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200467 props->max_srq = dev->dev->quotas.srq;
Jack Morgensteinc8681f12007-06-21 13:39:10 -0700468 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
Roland Dreier225c7b12007-05-08 18:00:38 -0700469 props->max_srq_sge = dev->dev->caps.max_srq_sge;
Eli Cohen5a0fd092010-10-07 16:24:16 +0200470 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
Roland Dreier225c7b12007-05-08 18:00:38 -0700471 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
472 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
473 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
Dotan Barak47e956b2012-07-11 15:39:29 +0000474 props->masked_atomic_cap = props->atomic_cap;
Roland Dreier5ae2a7a2007-06-18 08:15:02 -0700475 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
Roland Dreier225c7b12007-05-08 18:00:38 -0700476 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
477 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
478 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
479 props->max_mcast_grp;
Eli Cohena5bbe892012-02-09 18:10:06 +0200480 props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
Matan Barak4b664c42015-06-11 16:35:27 +0300481 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
482 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
Roland Dreier225c7b12007-05-08 18:00:38 -0700483
Matan Barak8a7ff142015-07-01 14:31:02 +0300484 if (!mlx4_is_slave(dev->dev))
485 err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
Matan Barak4b664c42015-06-11 16:35:27 +0300486
487 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
Matan Barak4b664c42015-06-11 16:35:27 +0300488 resp.response_length += sizeof(resp.hca_core_clock_offset);
Matan Barak8a7ff142015-07-01 14:31:02 +0300489 if (!err && !mlx4_is_slave(dev->dev)) {
490 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
491 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
492 }
Matan Barak4b664c42015-06-11 16:35:27 +0300493 }
494
495 if (uhw->outlen) {
496 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
497 if (err)
498 goto out;
499 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700500out:
501 kfree(in_mad);
502 kfree(out_mad);
503
504 return err;
505}
506
Eli Cohenfa417f72010-10-24 21:08:52 -0700507static enum rdma_link_layer
508mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
509{
510 struct mlx4_dev *dev = to_mdev(device)->dev;
511
Jack Morgenstein65dab252011-12-13 04:10:41 +0000512 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
Eli Cohenfa417f72010-10-24 21:08:52 -0700513 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
514}
515
516static int ib_link_query_port(struct ib_device *ibdev, u8 port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000517 struct ib_port_attr *props, int netw_view)
Eli Cohenfa417f72010-10-24 21:08:52 -0700518{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200519 struct ib_smp *in_mad = NULL;
520 struct ib_smp *out_mad = NULL;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300521 int ext_active_speed;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000522 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200523 int err = -ENOMEM;
524
525 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
526 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
527 if (!in_mad || !out_mad)
528 goto out;
529
530 init_query_mad(in_mad);
531 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
532 in_mad->attr_mod = cpu_to_be32(port);
533
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000534 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
535 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
536
537 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
Or Gerlitza9c766b2012-01-11 19:00:29 +0200538 in_mad, out_mad);
539 if (err)
540 goto out;
541
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300542
Eli Cohenfa417f72010-10-24 21:08:52 -0700543 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
544 props->lmc = out_mad->data[34] & 0x7;
545 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
546 props->sm_sl = out_mad->data[36] & 0xf;
547 props->state = out_mad->data[32] & 0xf;
548 props->phys_state = out_mad->data[33] >> 4;
549 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000550 if (netw_view)
551 props->gid_tbl_len = out_mad->data[50];
552 else
553 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
Eli Cohenfa417f72010-10-24 21:08:52 -0700554 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
555 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
556 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
557 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
558 props->active_width = out_mad->data[31] & 0xf;
559 props->active_speed = out_mad->data[35] >> 4;
560 props->max_mtu = out_mad->data[41] & 0xf;
561 props->active_mtu = out_mad->data[36] >> 4;
562 props->subnet_timeout = out_mad->data[51] & 0x1f;
563 props->max_vl_num = out_mad->data[37] >> 4;
564 props->init_type_reply = out_mad->data[41] >> 4;
565
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300566 /* Check if extended speeds (EDR/FDR/...) are supported */
567 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
568 ext_active_speed = out_mad->data[62] >> 4;
569
570 switch (ext_active_speed) {
571 case 1:
Or Gerlitz2e966912012-02-28 18:49:50 +0200572 props->active_speed = IB_SPEED_FDR;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300573 break;
574 case 2:
Or Gerlitz2e966912012-02-28 18:49:50 +0200575 props->active_speed = IB_SPEED_EDR;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300576 break;
577 }
578 }
579
580 /* If reported active speed is QDR, check if is FDR-10 */
Or Gerlitz2e966912012-02-28 18:49:50 +0200581 if (props->active_speed == IB_SPEED_QDR) {
Or Gerlitz8154c072012-03-06 15:50:50 +0200582 init_query_mad(in_mad);
583 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
584 in_mad->attr_mod = cpu_to_be32(port);
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300585
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000586 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
Or Gerlitz8154c072012-03-06 15:50:50 +0200587 NULL, NULL, in_mad, out_mad);
588 if (err)
Jesper Juhlbf6b47d2012-04-11 23:43:29 +0200589 goto out;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300590
Or Gerlitz8154c072012-03-06 15:50:50 +0200591 /* Checking LinkSpeedActive for FDR-10 */
592 if (out_mad->data[15] & 0x1)
593 props->active_speed = IB_SPEED_FDR10;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300594 }
Or Gerlitzd2ef4062012-04-02 17:45:20 +0300595
596 /* Avoid wrong speed value returned by FW if the IB link is down. */
597 if (props->state == IB_PORT_DOWN)
598 props->active_speed = IB_SPEED_SDR;
599
Or Gerlitza9c766b2012-01-11 19:00:29 +0200600out:
601 kfree(in_mad);
602 kfree(out_mad);
603 return err;
Eli Cohenfa417f72010-10-24 21:08:52 -0700604}
605
606static u8 state_to_phys_state(enum ib_port_state state)
607{
608 return state == IB_PORT_ACTIVE ? 5 : 3;
609}
610
611static int eth_link_query_port(struct ib_device *ibdev, u8 port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000612 struct ib_port_attr *props, int netw_view)
Eli Cohenfa417f72010-10-24 21:08:52 -0700613{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200614
615 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
616 struct mlx4_ib_iboe *iboe = &mdev->iboe;
Eli Cohenfa417f72010-10-24 21:08:52 -0700617 struct net_device *ndev;
618 enum ib_mtu tmp;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200619 struct mlx4_cmd_mailbox *mailbox;
620 int err = 0;
Moni Shouaa5750092015-02-03 16:48:37 +0200621 int is_bonded = mlx4_is_bonded(mdev->dev);
Eli Cohenfa417f72010-10-24 21:08:52 -0700622
Or Gerlitza9c766b2012-01-11 19:00:29 +0200623 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
624 if (IS_ERR(mailbox))
625 return PTR_ERR(mailbox);
626
627 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
628 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
629 MLX4_CMD_WRAPPED);
630 if (err)
631 goto out;
632
633 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
634 IB_WIDTH_4X : IB_WIDTH_1X;
Or Gerlitz2e966912012-02-28 18:49:50 +0200635 props->active_speed = IB_SPEED_QDR;
Moni Shouab4a26a22014-02-09 11:54:34 +0200636 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200637 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
638 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
Eli Cohenfa417f72010-10-24 21:08:52 -0700639 props->pkey_tbl_len = 1;
Or Gerlitzbcacb892011-10-10 10:53:41 +0200640 props->max_mtu = IB_MTU_4096;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200641 props->max_vl_num = 2;
Eli Cohenfa417f72010-10-24 21:08:52 -0700642 props->state = IB_PORT_DOWN;
643 props->phys_state = state_to_phys_state(props->state);
644 props->active_mtu = IB_MTU_256;
Jack Morgensteindba3ad22014-08-21 14:28:41 +0300645 spin_lock_bh(&iboe->lock);
Eli Cohenfa417f72010-10-24 21:08:52 -0700646 ndev = iboe->netdevs[port - 1];
Moni Shoua5070cd22015-07-30 18:33:30 +0300647 if (ndev && is_bonded) {
648 rcu_read_lock(); /* required to get upper dev */
649 ndev = netdev_master_upper_dev_get_rcu(ndev);
650 rcu_read_unlock();
651 }
Eli Cohenfa417f72010-10-24 21:08:52 -0700652 if (!ndev)
Or Gerlitza9c766b2012-01-11 19:00:29 +0200653 goto out_unlock;
Eli Cohenfa417f72010-10-24 21:08:52 -0700654
655 tmp = iboe_get_mtu(ndev->mtu);
656 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
657
Eli Cohen21d606092010-11-11 21:05:58 +0000658 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
Eli Cohenfa417f72010-10-24 21:08:52 -0700659 IB_PORT_ACTIVE : IB_PORT_DOWN;
660 props->phys_state = state_to_phys_state(props->state);
Or Gerlitza9c766b2012-01-11 19:00:29 +0200661out_unlock:
Jack Morgensteindba3ad22014-08-21 14:28:41 +0300662 spin_unlock_bh(&iboe->lock);
Or Gerlitza9c766b2012-01-11 19:00:29 +0200663out:
664 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
665 return err;
Eli Cohenfa417f72010-10-24 21:08:52 -0700666}
667
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000668int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
669 struct ib_port_attr *props, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700670{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200671 int err;
Roland Dreier225c7b12007-05-08 18:00:38 -0700672
673 memset(props, 0, sizeof *props);
674
Eli Cohenfa417f72010-10-24 21:08:52 -0700675 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000676 ib_link_query_port(ibdev, port, props, netw_view) :
677 eth_link_query_port(ibdev, port, props, netw_view);
Roland Dreier225c7b12007-05-08 18:00:38 -0700678
679 return err;
680}
681
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000682static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
683 struct ib_port_attr *props)
684{
685 /* returns host view */
686 return __mlx4_ib_query_port(ibdev, port, props, 0);
687}
688
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000689int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
690 union ib_gid *gid, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700691{
692 struct ib_smp *in_mad = NULL;
693 struct ib_smp *out_mad = NULL;
694 int err = -ENOMEM;
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000695 struct mlx4_ib_dev *dev = to_mdev(ibdev);
696 int clear = 0;
697 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -0700698
699 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
700 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
701 if (!in_mad || !out_mad)
702 goto out;
703
704 init_query_mad(in_mad);
705 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
706 in_mad->attr_mod = cpu_to_be32(port);
707
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000708 if (mlx4_is_mfunc(dev->dev) && netw_view)
709 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
710
711 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700712 if (err)
713 goto out;
714
715 memcpy(gid->raw, out_mad->data + 8, 8);
716
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000717 if (mlx4_is_mfunc(dev->dev) && !netw_view) {
718 if (index) {
719 /* For any index > 0, return the null guid */
720 err = 0;
721 clear = 1;
722 goto out;
723 }
724 }
725
Roland Dreier225c7b12007-05-08 18:00:38 -0700726 init_query_mad(in_mad);
727 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
728 in_mad->attr_mod = cpu_to_be32(index / 8);
729
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000730 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000731 NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700732 if (err)
733 goto out;
734
735 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
736
737out:
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000738 if (clear)
739 memset(gid->raw + 8, 0, 8);
Roland Dreier225c7b12007-05-08 18:00:38 -0700740 kfree(in_mad);
741 kfree(out_mad);
742 return err;
743}
744
Eli Cohenfa417f72010-10-24 21:08:52 -0700745static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
746 union ib_gid *gid)
747{
Moni Shoua5070cd22015-07-30 18:33:30 +0300748 int ret;
749
750 if (rdma_protocol_ib(ibdev, port))
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000751 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
Moni Shoua5070cd22015-07-30 18:33:30 +0300752
753 if (!rdma_protocol_roce(ibdev, port))
754 return -ENODEV;
755
756 if (!rdma_cap_roce_gid_table(ibdev, port))
757 return -ENODEV;
758
759 ret = ib_get_cached_gid(ibdev, port, index, gid);
760 if (ret == -EAGAIN) {
761 memcpy(gid, &zgid, sizeof(*gid));
762 return 0;
763 }
764
765 return ret;
Eli Cohenfa417f72010-10-24 21:08:52 -0700766}
767
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000768int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
769 u16 *pkey, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700770{
771 struct ib_smp *in_mad = NULL;
772 struct ib_smp *out_mad = NULL;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000773 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -0700774 int err = -ENOMEM;
775
776 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
777 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
778 if (!in_mad || !out_mad)
779 goto out;
780
781 init_query_mad(in_mad);
782 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
783 in_mad->attr_mod = cpu_to_be32(index / 32);
784
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000785 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
786 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
787
788 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
789 in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700790 if (err)
791 goto out;
792
793 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
794
795out:
796 kfree(in_mad);
797 kfree(out_mad);
798 return err;
799}
800
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000801static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
802{
803 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
804}
805
Roland Dreier225c7b12007-05-08 18:00:38 -0700806static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
807 struct ib_device_modify *props)
808{
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000809 struct mlx4_cmd_mailbox *mailbox;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000810 unsigned long flags;
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000811
Roland Dreier225c7b12007-05-08 18:00:38 -0700812 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
813 return -EOPNOTSUPP;
814
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000815 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
816 return 0;
817
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +0000818 if (mlx4_is_slave(to_mdev(ibdev)->dev))
819 return -EOPNOTSUPP;
820
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000821 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000822 memcpy(ibdev->node_desc, props->node_desc, 64);
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000823 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000824
825 /*
826 * If possible, pass node desc to FW, so it can generate
827 * a 144 trap. If cmd fails, just ignore.
828 */
829 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
830 if (IS_ERR(mailbox))
831 return 0;
832
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000833 memcpy(mailbox->buf, props->node_desc, 64);
834 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +0000835 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000836
837 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
Roland Dreier225c7b12007-05-08 18:00:38 -0700838
839 return 0;
840}
841
Jack Morgenstein61565012014-05-29 16:31:01 +0300842static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
843 u32 cap_mask)
Roland Dreier225c7b12007-05-08 18:00:38 -0700844{
845 struct mlx4_cmd_mailbox *mailbox;
846 int err;
847
848 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
849 if (IS_ERR(mailbox))
850 return PTR_ERR(mailbox);
851
Roland Dreier5ae2a7a2007-06-18 08:15:02 -0700852 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
853 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
854 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
855 } else {
856 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
857 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
858 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700859
Ido Shamaya130b592015-04-02 16:31:19 +0300860 err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
861 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
862 MLX4_CMD_WRAPPED);
Roland Dreier225c7b12007-05-08 18:00:38 -0700863
864 mlx4_free_cmd_mailbox(dev->dev, mailbox);
865 return err;
866}
867
868static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
869 struct ib_port_modify *props)
870{
Jack Morgenstein61565012014-05-29 16:31:01 +0300871 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
872 u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
Roland Dreier225c7b12007-05-08 18:00:38 -0700873 struct ib_port_attr attr;
874 u32 cap_mask;
875 int err;
876
Jack Morgenstein61565012014-05-29 16:31:01 +0300877 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
878 * of whether port link layer is ETH or IB. For ETH ports, qkey
879 * violations and port capabilities are not meaningful.
880 */
881 if (is_eth)
882 return 0;
883
884 mutex_lock(&mdev->cap_mask_mutex);
Roland Dreier225c7b12007-05-08 18:00:38 -0700885
886 err = mlx4_ib_query_port(ibdev, port, &attr);
887 if (err)
888 goto out;
889
890 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
891 ~props->clr_port_cap_mask;
892
Jack Morgenstein61565012014-05-29 16:31:01 +0300893 err = mlx4_ib_SET_PORT(mdev, port,
894 !!(mask & IB_PORT_RESET_QKEY_CNTR),
895 cap_mask);
Roland Dreier225c7b12007-05-08 18:00:38 -0700896
897out:
898 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
899 return err;
900}
901
902static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
903 struct ib_udata *udata)
904{
905 struct mlx4_ib_dev *dev = to_mdev(ibdev);
906 struct mlx4_ib_ucontext *context;
Or Gerlitz08ff3232012-10-21 14:59:24 +0000907 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
Roland Dreier225c7b12007-05-08 18:00:38 -0700908 struct mlx4_ib_alloc_ucontext_resp resp;
909 int err;
910
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -0700911 if (!dev->ib_active)
912 return ERR_PTR(-EAGAIN);
913
Or Gerlitz08ff3232012-10-21 14:59:24 +0000914 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
915 resp_v3.qp_tab_size = dev->dev->caps.num_qps;
916 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
917 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
918 } else {
919 resp.dev_caps = dev->dev->caps.userspace_caps;
920 resp.qp_tab_size = dev->dev->caps.num_qps;
921 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
922 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
923 resp.cqe_size = dev->dev->caps.cqe_size;
924 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700925
Yishai Hadasae184dd2015-08-13 18:32:06 +0300926 context = kzalloc(sizeof(*context), GFP_KERNEL);
Roland Dreier225c7b12007-05-08 18:00:38 -0700927 if (!context)
928 return ERR_PTR(-ENOMEM);
929
930 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
931 if (err) {
932 kfree(context);
933 return ERR_PTR(err);
934 }
935
936 INIT_LIST_HEAD(&context->db_page_list);
937 mutex_init(&context->db_page_mutex);
938
Or Gerlitz08ff3232012-10-21 14:59:24 +0000939 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
940 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
941 else
942 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
943
Roland Dreier225c7b12007-05-08 18:00:38 -0700944 if (err) {
945 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
946 kfree(context);
947 return ERR_PTR(-EFAULT);
948 }
949
950 return &context->ibucontext;
951}
952
953static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
954{
955 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
956
957 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
958 kfree(context);
959
960 return 0;
961}
962
Yishai Hadasae184dd2015-08-13 18:32:06 +0300963static void mlx4_ib_vma_open(struct vm_area_struct *area)
964{
965 /* vma_open is called when a new VMA is created on top of our VMA.
966 * This is done through either mremap flow or split_vma (usually due
967 * to mlock, madvise, munmap, etc.). We do not support a clone of the
968 * vma, as this VMA is strongly hardware related. Therefore we set the
969 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
970 * calling us again and trying to do incorrect actions. We assume that
971 * the original vma size is exactly a single page that there will be no
972 * "splitting" operations on.
973 */
974 area->vm_ops = NULL;
975}
976
977static void mlx4_ib_vma_close(struct vm_area_struct *area)
978{
979 struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
980
981 /* It's guaranteed that all VMAs opened on a FD are closed before the
982 * file itself is closed, therefore no sync is needed with the regular
983 * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
984 * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
985 * The close operation is usually called under mm->mmap_sem except when
986 * process is exiting. The exiting case is handled explicitly as part
987 * of mlx4_ib_disassociate_ucontext.
988 */
989 mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
990 area->vm_private_data;
991
992 /* set the vma context pointer to null in the mlx4_ib driver's private
993 * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
994 */
995 mlx4_ib_vma_priv_data->vma = NULL;
996}
997
998static const struct vm_operations_struct mlx4_ib_vm_ops = {
999 .open = mlx4_ib_vma_open,
1000 .close = mlx4_ib_vma_close
1001};
1002
1003static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1004{
1005 int i;
1006 int ret = 0;
1007 struct vm_area_struct *vma;
1008 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1009 struct task_struct *owning_process = NULL;
1010 struct mm_struct *owning_mm = NULL;
1011
1012 owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
1013 if (!owning_process)
1014 return;
1015
1016 owning_mm = get_task_mm(owning_process);
1017 if (!owning_mm) {
1018 pr_info("no mm, disassociate ucontext is pending task termination\n");
1019 while (1) {
1020 /* make sure that task is dead before returning, it may
1021 * prevent a rare case of module down in parallel to a
1022 * call to mlx4_ib_vma_close.
1023 */
1024 put_task_struct(owning_process);
1025 msleep(1);
1026 owning_process = get_pid_task(ibcontext->tgid,
1027 PIDTYPE_PID);
1028 if (!owning_process ||
1029 owning_process->state == TASK_DEAD) {
1030 pr_info("disassociate ucontext done, task was terminated\n");
1031 /* in case task was dead need to release the task struct */
1032 if (owning_process)
1033 put_task_struct(owning_process);
1034 return;
1035 }
1036 }
1037 }
1038
1039 /* need to protect from a race on closing the vma as part of
1040 * mlx4_ib_vma_close().
1041 */
1042 down_read(&owning_mm->mmap_sem);
1043 for (i = 0; i < HW_BAR_COUNT; i++) {
1044 vma = context->hw_bar_info[i].vma;
1045 if (!vma)
1046 continue;
1047
1048 ret = zap_vma_ptes(context->hw_bar_info[i].vma,
1049 context->hw_bar_info[i].vma->vm_start,
1050 PAGE_SIZE);
1051 if (ret) {
1052 pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret);
1053 BUG_ON(1);
1054 }
1055
1056 /* context going to be destroyed, should not access ops any more */
1057 context->hw_bar_info[i].vma->vm_ops = NULL;
1058 }
1059
1060 up_read(&owning_mm->mmap_sem);
1061 mmput(owning_mm);
1062 put_task_struct(owning_process);
1063}
1064
1065static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
1066 struct mlx4_ib_vma_private_data *vma_private_data)
1067{
1068 vma_private_data->vma = vma;
1069 vma->vm_private_data = vma_private_data;
1070 vma->vm_ops = &mlx4_ib_vm_ops;
1071}
1072
Roland Dreier225c7b12007-05-08 18:00:38 -07001073static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1074{
1075 struct mlx4_ib_dev *dev = to_mdev(context->device);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001076 struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
Roland Dreier225c7b12007-05-08 18:00:38 -07001077
1078 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1079 return -EINVAL;
1080
1081 if (vma->vm_pgoff == 0) {
Yishai Hadasae184dd2015-08-13 18:32:06 +03001082 /* We prevent double mmaping on same context */
1083 if (mucontext->hw_bar_info[HW_BAR_DB].vma)
1084 return -EINVAL;
1085
Roland Dreier225c7b12007-05-08 18:00:38 -07001086 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1087
1088 if (io_remap_pfn_range(vma, vma->vm_start,
1089 to_mucontext(context)->uar.pfn,
1090 PAGE_SIZE, vma->vm_page_prot))
1091 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001092
1093 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
1094
Roland Dreier225c7b12007-05-08 18:00:38 -07001095 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
Yishai Hadasae184dd2015-08-13 18:32:06 +03001096 /* We prevent double mmaping on same context */
1097 if (mucontext->hw_bar_info[HW_BAR_BF].vma)
1098 return -EINVAL;
1099
Roland Dreiere1d60ec2009-03-30 08:31:05 -07001100 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Roland Dreier225c7b12007-05-08 18:00:38 -07001101
1102 if (io_remap_pfn_range(vma, vma->vm_start,
1103 to_mucontext(context)->uar.pfn +
1104 dev->dev->caps.num_uars,
1105 PAGE_SIZE, vma->vm_page_prot))
1106 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001107
1108 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
1109
Matan Barak52033cf2015-06-11 16:35:26 +03001110 } else if (vma->vm_pgoff == 3) {
1111 struct mlx4_clock_params params;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001112 int ret;
1113
1114 /* We prevent double mmaping on same context */
1115 if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
1116 return -EINVAL;
1117
1118 ret = mlx4_get_internal_clock_params(dev->dev, &params);
Matan Barak52033cf2015-06-11 16:35:26 +03001119
1120 if (ret)
1121 return ret;
1122
1123 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1124 if (io_remap_pfn_range(vma, vma->vm_start,
1125 (pci_resource_start(dev->dev->persist->pdev,
1126 params.bar) +
1127 params.offset)
1128 >> PAGE_SHIFT,
1129 PAGE_SIZE, vma->vm_page_prot))
1130 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001131
1132 mlx4_ib_set_vma_data(vma,
1133 &mucontext->hw_bar_info[HW_BAR_CLOCK]);
Matan Barak52033cf2015-06-11 16:35:26 +03001134 } else {
Roland Dreier225c7b12007-05-08 18:00:38 -07001135 return -EINVAL;
Matan Barak52033cf2015-06-11 16:35:26 +03001136 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001137
1138 return 0;
1139}
1140
1141static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
1142 struct ib_ucontext *context,
1143 struct ib_udata *udata)
1144{
1145 struct mlx4_ib_pd *pd;
1146 int err;
1147
1148 pd = kmalloc(sizeof *pd, GFP_KERNEL);
1149 if (!pd)
1150 return ERR_PTR(-ENOMEM);
1151
1152 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1153 if (err) {
1154 kfree(pd);
1155 return ERR_PTR(err);
1156 }
1157
1158 if (context)
1159 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
1160 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1161 kfree(pd);
1162 return ERR_PTR(-EFAULT);
1163 }
1164
1165 return &pd->ibpd;
1166}
1167
1168static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
1169{
1170 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1171 kfree(pd);
1172
1173 return 0;
1174}
1175
Sean Hefty012a8ff2011-06-02 09:01:33 -07001176static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
1177 struct ib_ucontext *context,
1178 struct ib_udata *udata)
1179{
1180 struct mlx4_ib_xrcd *xrcd;
Matan Barak8e372102015-06-11 16:35:21 +03001181 struct ib_cq_init_attr cq_attr = {};
Sean Hefty012a8ff2011-06-02 09:01:33 -07001182 int err;
1183
1184 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1185 return ERR_PTR(-ENOSYS);
1186
1187 xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
1188 if (!xrcd)
1189 return ERR_PTR(-ENOMEM);
1190
1191 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
1192 if (err)
1193 goto err1;
1194
1195 xrcd->pd = ib_alloc_pd(ibdev);
1196 if (IS_ERR(xrcd->pd)) {
1197 err = PTR_ERR(xrcd->pd);
1198 goto err2;
1199 }
1200
Matan Barak8e372102015-06-11 16:35:21 +03001201 cq_attr.cqe = 1;
1202 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
Sean Hefty012a8ff2011-06-02 09:01:33 -07001203 if (IS_ERR(xrcd->cq)) {
1204 err = PTR_ERR(xrcd->cq);
1205 goto err3;
1206 }
1207
1208 return &xrcd->ibxrcd;
1209
1210err3:
1211 ib_dealloc_pd(xrcd->pd);
1212err2:
1213 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
1214err1:
1215 kfree(xrcd);
1216 return ERR_PTR(err);
1217}
1218
1219static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1220{
1221 ib_destroy_cq(to_mxrcd(xrcd)->cq);
1222 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1223 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1224 kfree(xrcd);
1225
1226 return 0;
1227}
1228
Eli Cohenfa417f72010-10-24 21:08:52 -07001229static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1230{
1231 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1232 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1233 struct mlx4_ib_gid_entry *ge;
1234
1235 ge = kzalloc(sizeof *ge, GFP_KERNEL);
1236 if (!ge)
1237 return -ENOMEM;
1238
1239 ge->gid = *gid;
1240 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1241 ge->port = mqp->port;
1242 ge->added = 1;
1243 }
1244
1245 mutex_lock(&mqp->mutex);
1246 list_add_tail(&ge->list, &mqp->gid_list);
1247 mutex_unlock(&mqp->mutex);
1248
1249 return 0;
1250}
1251
1252int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1253 union ib_gid *gid)
1254{
Eli Cohenfa417f72010-10-24 21:08:52 -07001255 struct net_device *ndev;
1256 int ret = 0;
1257
1258 if (!mqp->port)
1259 return 0;
1260
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001261 spin_lock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001262 ndev = mdev->iboe.netdevs[mqp->port - 1];
1263 if (ndev)
1264 dev_hold(ndev);
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001265 spin_unlock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001266
1267 if (ndev) {
Eli Cohenfa417f72010-10-24 21:08:52 -07001268 ret = 1;
Eli Cohenfa417f72010-10-24 21:08:52 -07001269 dev_put(ndev);
1270 }
1271
1272 return ret;
1273}
1274
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001275struct mlx4_ib_steering {
1276 struct list_head list;
Moni Shoua146d6e12015-02-03 16:48:38 +02001277 struct mlx4_flow_reg_id reg_id;
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001278 union ib_gid gid;
1279};
1280
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001281static int parse_flow_attr(struct mlx4_dev *dev,
Matan Baraka37a1a42013-11-07 15:25:16 +02001282 u32 qp_num,
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001283 union ib_flow_spec *ib_spec,
1284 struct _rule_hw *mlx4_spec)
1285{
1286 enum mlx4_net_trans_rule_id type;
1287
1288 switch (ib_spec->type) {
1289 case IB_FLOW_SPEC_ETH:
1290 type = MLX4_NET_TRANS_RULE_ID_ETH;
1291 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1292 ETH_ALEN);
1293 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1294 ETH_ALEN);
1295 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1296 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1297 break;
Matan Baraka37a1a42013-11-07 15:25:16 +02001298 case IB_FLOW_SPEC_IB:
1299 type = MLX4_NET_TRANS_RULE_ID_IB;
1300 mlx4_spec->ib.l3_qpn =
1301 cpu_to_be32(qp_num);
1302 mlx4_spec->ib.qpn_mask =
1303 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1304 break;
1305
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001306
1307 case IB_FLOW_SPEC_IPV4:
1308 type = MLX4_NET_TRANS_RULE_ID_IPV4;
1309 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1310 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1311 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1312 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1313 break;
1314
1315 case IB_FLOW_SPEC_TCP:
1316 case IB_FLOW_SPEC_UDP:
1317 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1318 MLX4_NET_TRANS_RULE_ID_TCP :
1319 MLX4_NET_TRANS_RULE_ID_UDP;
1320 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1321 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1322 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1323 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1324 break;
1325
1326 default:
1327 return -EINVAL;
1328 }
1329 if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1330 mlx4_hw_rule_sz(dev, type) < 0)
1331 return -EINVAL;
1332 mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1333 mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1334 return mlx4_hw_rule_sz(dev, type);
1335}
1336
Matan Baraka37a1a42013-11-07 15:25:16 +02001337struct default_rules {
1338 __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1339 __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1340 __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1341 __u8 link_layer;
1342};
1343static const struct default_rules default_table[] = {
1344 {
1345 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
1346 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1347 .rules_create_list = {IB_FLOW_SPEC_IB},
1348 .link_layer = IB_LINK_LAYER_INFINIBAND
1349 }
1350};
1351
1352static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1353 struct ib_flow_attr *flow_attr)
1354{
1355 int i, j, k;
1356 void *ib_flow;
1357 const struct default_rules *pdefault_rules = default_table;
1358 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1359
Fabian Fredericka57f23f2014-08-12 19:20:07 -04001360 for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001361 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1362 memset(&field_types, 0, sizeof(field_types));
1363
1364 if (link_layer != pdefault_rules->link_layer)
1365 continue;
1366
1367 ib_flow = flow_attr + 1;
1368 /* we assume the specs are sorted */
1369 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1370 j < flow_attr->num_of_specs; k++) {
1371 union ib_flow_spec *current_flow =
1372 (union ib_flow_spec *)ib_flow;
1373
1374 /* same layer but different type */
1375 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1376 (pdefault_rules->mandatory_fields[k] &
1377 IB_FLOW_SPEC_LAYER_MASK)) &&
1378 (current_flow->type !=
1379 pdefault_rules->mandatory_fields[k]))
1380 goto out;
1381
1382 /* same layer, try match next one */
1383 if (current_flow->type ==
1384 pdefault_rules->mandatory_fields[k]) {
1385 j++;
1386 ib_flow +=
1387 ((union ib_flow_spec *)ib_flow)->size;
1388 }
1389 }
1390
1391 ib_flow = flow_attr + 1;
1392 for (j = 0; j < flow_attr->num_of_specs;
1393 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1394 for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1395 /* same layer and same type */
1396 if (((union ib_flow_spec *)ib_flow)->type ==
1397 pdefault_rules->mandatory_not_fields[k])
1398 goto out;
1399
1400 return i;
1401 }
1402out:
1403 return -1;
1404}
1405
1406static int __mlx4_ib_create_default_rules(
1407 struct mlx4_ib_dev *mdev,
1408 struct ib_qp *qp,
1409 const struct default_rules *pdefault_rules,
1410 struct _rule_hw *mlx4_spec) {
1411 int size = 0;
1412 int i;
1413
Fabian Fredericka57f23f2014-08-12 19:20:07 -04001414 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001415 int ret;
1416 union ib_flow_spec ib_spec;
1417 switch (pdefault_rules->rules_create_list[i]) {
1418 case 0:
1419 /* no rule */
1420 continue;
1421 case IB_FLOW_SPEC_IB:
1422 ib_spec.type = IB_FLOW_SPEC_IB;
1423 ib_spec.size = sizeof(struct ib_flow_spec_ib);
1424
1425 break;
1426 default:
1427 /* invalid rule */
1428 return -EINVAL;
1429 }
1430 /* We must put empty rule, qpn is being ignored */
1431 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1432 mlx4_spec);
1433 if (ret < 0) {
1434 pr_info("invalid parsing\n");
1435 return -EINVAL;
1436 }
1437
1438 mlx4_spec = (void *)mlx4_spec + ret;
1439 size += ret;
1440 }
1441 return size;
1442}
1443
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001444static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1445 int domain,
1446 enum mlx4_net_trans_promisc_mode flow_type,
1447 u64 *reg_id)
1448{
1449 int ret, i;
1450 int size = 0;
1451 void *ib_flow;
1452 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1453 struct mlx4_cmd_mailbox *mailbox;
1454 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
Matan Baraka37a1a42013-11-07 15:25:16 +02001455 int default_flow;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001456
1457 static const u16 __mlx4_domain[] = {
1458 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1459 [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1460 [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1461 [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1462 };
1463
1464 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1465 pr_err("Invalid priority value %d\n", flow_attr->priority);
1466 return -EINVAL;
1467 }
1468
1469 if (domain >= IB_FLOW_DOMAIN_NUM) {
1470 pr_err("Invalid domain value %d\n", domain);
1471 return -EINVAL;
1472 }
1473
1474 if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1475 return -EINVAL;
1476
1477 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1478 if (IS_ERR(mailbox))
1479 return PTR_ERR(mailbox);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001480 ctrl = mailbox->buf;
1481
1482 ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1483 flow_attr->priority);
1484 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1485 ctrl->port = flow_attr->port;
1486 ctrl->qpn = cpu_to_be32(qp->qp_num);
1487
1488 ib_flow = flow_attr + 1;
1489 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
Matan Baraka37a1a42013-11-07 15:25:16 +02001490 /* Add default flows */
1491 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1492 if (default_flow >= 0) {
1493 ret = __mlx4_ib_create_default_rules(
1494 mdev, qp, default_table + default_flow,
1495 mailbox->buf + size);
1496 if (ret < 0) {
1497 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1498 return -EINVAL;
1499 }
1500 size += ret;
1501 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001502 for (i = 0; i < flow_attr->num_of_specs; i++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001503 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1504 mailbox->buf + size);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001505 if (ret < 0) {
1506 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1507 return -EINVAL;
1508 }
1509 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1510 size += ret;
1511 }
1512
1513 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1514 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
Matan Barak48564132015-05-31 09:30:15 +03001515 MLX4_CMD_WRAPPED);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001516 if (ret == -ENOMEM)
1517 pr_err("mcg table is full. Fail to register network rule.\n");
1518 else if (ret == -ENXIO)
1519 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1520 else if (ret)
1521 pr_err("Invalid argumant. Fail to register network rule.\n");
1522
1523 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1524 return ret;
1525}
1526
1527static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1528{
1529 int err;
1530 err = mlx4_cmd(dev, reg_id, 0, 0,
1531 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
Matan Barak48564132015-05-31 09:30:15 +03001532 MLX4_CMD_WRAPPED);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001533 if (err)
1534 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1535 reg_id);
1536 return err;
1537}
1538
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001539static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1540 u64 *reg_id)
1541{
1542 void *ib_flow;
1543 union ib_flow_spec *ib_spec;
1544 struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1545 int err = 0;
1546
Or Gerlitz5eff6da2015-01-15 15:28:54 +02001547 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1548 dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001549 return 0; /* do nothing */
1550
1551 ib_flow = flow_attr + 1;
1552 ib_spec = (union ib_flow_spec *)ib_flow;
1553
1554 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1555 return 0; /* do nothing */
1556
1557 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1558 flow_attr->port, qp->qp_num,
1559 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1560 reg_id);
1561 return err;
1562}
1563
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001564static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1565 struct ib_flow_attr *flow_attr,
1566 int domain)
1567{
Moni Shoua146d6e12015-02-03 16:48:38 +02001568 int err = 0, i = 0, j = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001569 struct mlx4_ib_flow *mflow;
1570 enum mlx4_net_trans_promisc_mode type[2];
Moni Shoua146d6e12015-02-03 16:48:38 +02001571 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1572 int is_bonded = mlx4_is_bonded(dev);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001573
1574 memset(type, 0, sizeof(type));
1575
1576 mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1577 if (!mflow) {
1578 err = -ENOMEM;
1579 goto err_free;
1580 }
1581
1582 switch (flow_attr->type) {
1583 case IB_FLOW_ATTR_NORMAL:
1584 type[0] = MLX4_FS_REGULAR;
1585 break;
1586
1587 case IB_FLOW_ATTR_ALL_DEFAULT:
1588 type[0] = MLX4_FS_ALL_DEFAULT;
1589 break;
1590
1591 case IB_FLOW_ATTR_MC_DEFAULT:
1592 type[0] = MLX4_FS_MC_DEFAULT;
1593 break;
1594
1595 case IB_FLOW_ATTR_SNIFFER:
1596 type[0] = MLX4_FS_UC_SNIFFER;
1597 type[1] = MLX4_FS_MC_SNIFFER;
1598 break;
1599
1600 default:
1601 err = -EINVAL;
1602 goto err_free;
1603 }
1604
1605 while (i < ARRAY_SIZE(type) && type[i]) {
1606 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
Moni Shoua146d6e12015-02-03 16:48:38 +02001607 &mflow->reg_id[i].id);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001608 if (err)
Or Gerlitz571e1b22014-10-30 15:59:28 +02001609 goto err_create_flow;
Moni Shoua146d6e12015-02-03 16:48:38 +02001610 if (is_bonded) {
Moni Shoua824c25c2015-02-08 11:49:33 +02001611 /* Application always sees one port so the mirror rule
1612 * must be on port #2
1613 */
Moni Shoua146d6e12015-02-03 16:48:38 +02001614 flow_attr->port = 2;
1615 err = __mlx4_ib_create_flow(qp, flow_attr,
1616 domain, type[j],
1617 &mflow->reg_id[j].mirror);
1618 flow_attr->port = 1;
1619 if (err)
1620 goto err_create_flow;
1621 j++;
1622 }
1623
Roland Dreier11562562015-05-29 23:11:27 -07001624 i++;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001625 }
1626
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001627 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
Moni Shoua146d6e12015-02-03 16:48:38 +02001628 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1629 &mflow->reg_id[i].id);
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001630 if (err)
Or Gerlitz571e1b22014-10-30 15:59:28 +02001631 goto err_create_flow;
Roland Dreier11562562015-05-29 23:11:27 -07001632
Moni Shoua146d6e12015-02-03 16:48:38 +02001633 if (is_bonded) {
1634 flow_attr->port = 2;
1635 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1636 &mflow->reg_id[j].mirror);
1637 flow_attr->port = 1;
1638 if (err)
1639 goto err_create_flow;
1640 j++;
1641 }
1642 /* function to create mirror rule */
Roland Dreier11562562015-05-29 23:11:27 -07001643 i++;
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001644 }
1645
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001646 return &mflow->ibflow;
1647
Or Gerlitz571e1b22014-10-30 15:59:28 +02001648err_create_flow:
1649 while (i) {
Moni Shoua146d6e12015-02-03 16:48:38 +02001650 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1651 mflow->reg_id[i].id);
Or Gerlitz571e1b22014-10-30 15:59:28 +02001652 i--;
1653 }
Moni Shoua146d6e12015-02-03 16:48:38 +02001654
1655 while (j) {
1656 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1657 mflow->reg_id[j].mirror);
1658 j--;
1659 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001660err_free:
1661 kfree(mflow);
1662 return ERR_PTR(err);
1663}
1664
1665static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1666{
1667 int err, ret = 0;
1668 int i = 0;
1669 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1670 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1671
Moni Shoua146d6e12015-02-03 16:48:38 +02001672 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1673 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001674 if (err)
1675 ret = err;
Moni Shoua146d6e12015-02-03 16:48:38 +02001676 if (mflow->reg_id[i].mirror) {
1677 err = __mlx4_ib_destroy_flow(mdev->dev,
1678 mflow->reg_id[i].mirror);
1679 if (err)
1680 ret = err;
1681 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001682 i++;
1683 }
1684
1685 kfree(mflow);
1686 return ret;
1687}
1688
Roland Dreier225c7b12007-05-08 18:00:38 -07001689static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1690{
Eli Cohenfa417f72010-10-24 21:08:52 -07001691 int err;
1692 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
Moni Shoua146d6e12015-02-03 16:48:38 +02001693 struct mlx4_dev *dev = mdev->dev;
Eli Cohenfa417f72010-10-24 21:08:52 -07001694 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001695 struct mlx4_ib_steering *ib_steering = NULL;
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001696 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
Moni Shoua146d6e12015-02-03 16:48:38 +02001697 struct mlx4_flow_reg_id reg_id;
Eli Cohenfa417f72010-10-24 21:08:52 -07001698
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001699 if (mdev->dev->caps.steering_mode ==
1700 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1701 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1702 if (!ib_steering)
1703 return -ENOMEM;
1704 }
1705
1706 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1707 !!(mqp->flags &
1708 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
Moni Shoua146d6e12015-02-03 16:48:38 +02001709 prot, &reg_id.id);
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001710 if (err) {
1711 pr_err("multicast attach op failed, err %d\n", err);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001712 goto err_malloc;
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001713 }
Eli Cohenfa417f72010-10-24 21:08:52 -07001714
Moni Shoua146d6e12015-02-03 16:48:38 +02001715 reg_id.mirror = 0;
1716 if (mlx4_is_bonded(dev)) {
Moni Shoua824c25c2015-02-08 11:49:33 +02001717 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1718 (mqp->port == 1) ? 2 : 1,
Moni Shoua146d6e12015-02-03 16:48:38 +02001719 !!(mqp->flags &
1720 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1721 prot, &reg_id.mirror);
1722 if (err)
1723 goto err_add;
1724 }
1725
Eli Cohenfa417f72010-10-24 21:08:52 -07001726 err = add_gid_entry(ibqp, gid);
1727 if (err)
1728 goto err_add;
1729
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001730 if (ib_steering) {
1731 memcpy(ib_steering->gid.raw, gid->raw, 16);
1732 ib_steering->reg_id = reg_id;
1733 mutex_lock(&mqp->mutex);
1734 list_add(&ib_steering->list, &mqp->steering_rules);
1735 mutex_unlock(&mqp->mutex);
1736 }
Eli Cohenfa417f72010-10-24 21:08:52 -07001737 return 0;
1738
1739err_add:
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001740 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
Moni Shoua146d6e12015-02-03 16:48:38 +02001741 prot, reg_id.id);
1742 if (reg_id.mirror)
1743 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1744 prot, reg_id.mirror);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001745err_malloc:
1746 kfree(ib_steering);
1747
Eli Cohenfa417f72010-10-24 21:08:52 -07001748 return err;
1749}
1750
1751static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1752{
1753 struct mlx4_ib_gid_entry *ge;
1754 struct mlx4_ib_gid_entry *tmp;
1755 struct mlx4_ib_gid_entry *ret = NULL;
1756
1757 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1758 if (!memcmp(raw, ge->gid.raw, 16)) {
1759 ret = ge;
1760 break;
1761 }
1762 }
1763
1764 return ret;
Roland Dreier225c7b12007-05-08 18:00:38 -07001765}
1766
1767static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1768{
Eli Cohenfa417f72010-10-24 21:08:52 -07001769 int err;
1770 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
Moni Shoua146d6e12015-02-03 16:48:38 +02001771 struct mlx4_dev *dev = mdev->dev;
Eli Cohenfa417f72010-10-24 21:08:52 -07001772 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
Eli Cohenfa417f72010-10-24 21:08:52 -07001773 struct net_device *ndev;
1774 struct mlx4_ib_gid_entry *ge;
Moni Shoua146d6e12015-02-03 16:48:38 +02001775 struct mlx4_flow_reg_id reg_id = {0, 0};
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001776 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
Eli Cohenfa417f72010-10-24 21:08:52 -07001777
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001778 if (mdev->dev->caps.steering_mode ==
1779 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1780 struct mlx4_ib_steering *ib_steering;
1781
1782 mutex_lock(&mqp->mutex);
1783 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1784 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1785 list_del(&ib_steering->list);
1786 break;
1787 }
1788 }
1789 mutex_unlock(&mqp->mutex);
1790 if (&ib_steering->list == &mqp->steering_rules) {
1791 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1792 return -EINVAL;
1793 }
1794 reg_id = ib_steering->reg_id;
1795 kfree(ib_steering);
1796 }
1797
1798 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
Moni Shoua146d6e12015-02-03 16:48:38 +02001799 prot, reg_id.id);
Eli Cohenfa417f72010-10-24 21:08:52 -07001800 if (err)
1801 return err;
1802
Moni Shoua146d6e12015-02-03 16:48:38 +02001803 if (mlx4_is_bonded(dev)) {
1804 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1805 prot, reg_id.mirror);
1806 if (err)
1807 return err;
1808 }
1809
Eli Cohenfa417f72010-10-24 21:08:52 -07001810 mutex_lock(&mqp->mutex);
1811 ge = find_gid_entry(mqp, gid->raw);
1812 if (ge) {
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001813 spin_lock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001814 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1815 if (ndev)
1816 dev_hold(ndev);
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001817 spin_unlock_bh(&mdev->iboe.lock);
Moni Shouad487ee72013-12-12 18:03:13 +02001818 if (ndev)
Eli Cohenfa417f72010-10-24 21:08:52 -07001819 dev_put(ndev);
Eli Cohenfa417f72010-10-24 21:08:52 -07001820 list_del(&ge->list);
1821 kfree(ge);
1822 } else
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03001823 pr_warn("could not find mgid entry\n");
Eli Cohenfa417f72010-10-24 21:08:52 -07001824
1825 mutex_unlock(&mqp->mutex);
1826
1827 return 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07001828}
1829
1830static int init_node_data(struct mlx4_ib_dev *dev)
1831{
1832 struct ib_smp *in_mad = NULL;
1833 struct ib_smp *out_mad = NULL;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001834 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -07001835 int err = -ENOMEM;
1836
1837 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1838 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1839 if (!in_mad || !out_mad)
1840 goto out;
1841
1842 init_query_mad(in_mad);
1843 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001844 if (mlx4_is_master(dev->dev))
1845 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
Roland Dreier225c7b12007-05-08 18:00:38 -07001846
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001847 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -07001848 if (err)
1849 goto out;
1850
1851 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1852
1853 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1854
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001855 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -07001856 if (err)
1857 goto out;
1858
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00001859 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
Roland Dreier225c7b12007-05-08 18:00:38 -07001860 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1861
1862out:
1863 kfree(in_mad);
1864 kfree(out_mad);
1865 return err;
1866}
1867
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001868static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1869 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001870{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001871 struct mlx4_ib_dev *dev =
1872 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
Yishai Hadas872bf2f2015-01-25 16:59:35 +02001873 return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001874}
1875
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001876static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1877 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001878{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001879 struct mlx4_ib_dev *dev =
1880 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001881 return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
1882 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
1883 (int) dev->dev->caps.fw_ver & 0xffff);
1884}
1885
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001886static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1887 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001888{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001889 struct mlx4_ib_dev *dev =
1890 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001891 return sprintf(buf, "%x\n", dev->dev->rev_id);
1892}
1893
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001894static ssize_t show_board(struct device *device, struct device_attribute *attr,
1895 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001896{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001897 struct mlx4_ib_dev *dev =
1898 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1899 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
1900 dev->dev->board_id);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001901}
1902
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001903static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1904static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1905static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1906static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001907
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001908static struct device_attribute *mlx4_class_attributes[] = {
1909 &dev_attr_hw_rev,
1910 &dev_attr_fw_ver,
1911 &dev_attr_hca_type,
1912 &dev_attr_board_id
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001913};
1914
Matan Barak9433c182014-05-15 15:29:28 +03001915#define MLX4_IB_INVALID_MAC ((u64)-1)
1916static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
1917 struct net_device *dev,
1918 int port)
1919{
1920 u64 new_smac = 0;
1921 u64 release_mac = MLX4_IB_INVALID_MAC;
1922 struct mlx4_ib_qp *qp;
1923
1924 read_lock(&dev_base_lock);
1925 new_smac = mlx4_mac_to_u64(dev->dev_addr);
1926 read_unlock(&dev_base_lock);
1927
Jack Morgenstein3e0629c2014-09-11 14:11:17 +03001928 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
1929
Jack Morgensteind24d9f42014-09-11 14:11:18 +03001930 /* no need for update QP1 and mac registration in non-SRIOV */
1931 if (!mlx4_is_mfunc(ibdev->dev))
1932 return;
1933
Matan Barak9433c182014-05-15 15:29:28 +03001934 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
1935 qp = ibdev->qp1_proxy[port - 1];
1936 if (qp) {
1937 int new_smac_index;
Jack Morgenstein25476b02014-09-11 14:11:20 +03001938 u64 old_smac;
Matan Barak9433c182014-05-15 15:29:28 +03001939 struct mlx4_update_qp_params update_params;
1940
Jack Morgenstein25476b02014-09-11 14:11:20 +03001941 mutex_lock(&qp->mutex);
1942 old_smac = qp->pri.smac;
Matan Barak9433c182014-05-15 15:29:28 +03001943 if (new_smac == old_smac)
1944 goto unlock;
1945
1946 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
1947
1948 if (new_smac_index < 0)
1949 goto unlock;
1950
1951 update_params.smac_index = new_smac_index;
Matan Barak09e05c32014-09-10 16:41:56 +03001952 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
Matan Barak9433c182014-05-15 15:29:28 +03001953 &update_params)) {
1954 release_mac = new_smac;
1955 goto unlock;
1956 }
Jack Morgenstein25476b02014-09-11 14:11:20 +03001957 /* if old port was zero, no mac was yet registered for this QP */
1958 if (qp->pri.smac_port)
1959 release_mac = old_smac;
Matan Barak9433c182014-05-15 15:29:28 +03001960 qp->pri.smac = new_smac;
Jack Morgenstein25476b02014-09-11 14:11:20 +03001961 qp->pri.smac_port = port;
Matan Barak9433c182014-05-15 15:29:28 +03001962 qp->pri.smac_index = new_smac_index;
Matan Barak9433c182014-05-15 15:29:28 +03001963 }
1964
1965unlock:
Matan Barak9433c182014-05-15 15:29:28 +03001966 if (release_mac != MLX4_IB_INVALID_MAC)
1967 mlx4_unregister_mac(ibdev->dev, port, release_mac);
Jack Morgenstein25476b02014-09-11 14:11:20 +03001968 if (qp)
1969 mutex_unlock(&qp->mutex);
1970 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
Matan Barak9433c182014-05-15 15:29:28 +03001971}
1972
Matan Barak9433c182014-05-15 15:29:28 +03001973static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1974 struct net_device *dev,
1975 unsigned long event)
1976
Moni Shouad487ee72013-12-12 18:03:13 +02001977{
1978 struct mlx4_ib_iboe *iboe;
Matan Barak9433c182014-05-15 15:29:28 +03001979 int update_qps_port = -1;
Moni Shouad487ee72013-12-12 18:03:13 +02001980 int port;
1981
Moni Shoua5070cd22015-07-30 18:33:30 +03001982 ASSERT_RTNL();
1983
Moni Shouad487ee72013-12-12 18:03:13 +02001984 iboe = &ibdev->iboe;
1985
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001986 spin_lock_bh(&iboe->lock);
Moni Shouad487ee72013-12-12 18:03:13 +02001987 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
Moni Shouaad4885d22014-02-05 15:13:02 +02001988
Moni Shouad487ee72013-12-12 18:03:13 +02001989 iboe->netdevs[port - 1] =
1990 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
Moni Shouad487ee72013-12-12 18:03:13 +02001991
Matan Barak9433c182014-05-15 15:29:28 +03001992 if (dev == iboe->netdevs[port - 1] &&
1993 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
1994 event == NETDEV_UP || event == NETDEV_CHANGE))
1995 update_qps_port = port;
1996
Moni Shouad487ee72013-12-12 18:03:13 +02001997 }
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001998 spin_unlock_bh(&iboe->lock);
Matan Barak9433c182014-05-15 15:29:28 +03001999
2000 if (update_qps_port > 0)
2001 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
Moni Shouad487ee72013-12-12 18:03:13 +02002002}
2003
2004static int mlx4_ib_netdev_event(struct notifier_block *this,
2005 unsigned long event, void *ptr)
2006{
Jiri Pirko351638e2013-05-28 01:30:21 +00002007 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Eli Cohenfa417f72010-10-24 21:08:52 -07002008 struct mlx4_ib_dev *ibdev;
Eli Cohenfa417f72010-10-24 21:08:52 -07002009
2010 if (!net_eq(dev_net(dev), &init_net))
2011 return NOTIFY_DONE;
2012
2013 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
Matan Barak9433c182014-05-15 15:29:28 +03002014 mlx4_ib_scan_netdevs(ibdev, dev, event);
Eli Cohenfa417f72010-10-24 21:08:52 -07002015
2016 return NOTIFY_DONE;
2017}
2018
Jack Morgenstein54679e12012-08-03 08:40:43 +00002019static void init_pkeys(struct mlx4_ib_dev *ibdev)
2020{
2021 int port;
2022 int slave;
2023 int i;
2024
2025 if (mlx4_is_master(ibdev->dev)) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002026 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2027 ++slave) {
Jack Morgenstein54679e12012-08-03 08:40:43 +00002028 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2029 for (i = 0;
2030 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2031 ++i) {
2032 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2033 /* master has the identity virt2phys pkey mapping */
2034 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2035 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2036 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2037 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2038 }
2039 }
2040 }
2041 /* initialize pkey cache */
2042 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2043 for (i = 0;
2044 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2045 ++i)
2046 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2047 (i) ? 0 : 0xFFFF;
2048 }
2049 }
2050}
2051
Shlomo Pongratze605b742012-04-29 17:04:27 +03002052static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2053{
Matan Barakc66fa192015-05-31 09:30:16 +03002054 int i, j, eq = 0, total_eqs = 0;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002055
Matan Barakc66fa192015-05-31 09:30:16 +03002056 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2057 sizeof(ibdev->eq_table[0]), GFP_KERNEL);
Shlomo Pongratze605b742012-04-29 17:04:27 +03002058 if (!ibdev->eq_table)
2059 return;
2060
Matan Barakc66fa192015-05-31 09:30:16 +03002061 for (i = 1; i <= dev->caps.num_ports; i++) {
2062 for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2063 j++, total_eqs++) {
2064 if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
2065 continue;
2066 ibdev->eq_table[eq] = total_eqs;
2067 if (!mlx4_assign_eq(dev, i,
2068 &ibdev->eq_table[eq]))
2069 eq++;
2070 else
2071 ibdev->eq_table[eq] = -1;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002072 }
2073 }
2074
Matan Barakc66fa192015-05-31 09:30:16 +03002075 for (i = eq; i < dev->caps.num_comp_vectors;
2076 ibdev->eq_table[i++] = -1)
2077 ;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002078
2079 /* Advertise the new number of EQs to clients */
Matan Barakc66fa192015-05-31 09:30:16 +03002080 ibdev->ib_dev.num_comp_vectors = eq;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002081}
2082
2083static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2084{
2085 int i;
Matan Barakc66fa192015-05-31 09:30:16 +03002086 int total_eqs = ibdev->ib_dev.num_comp_vectors;
Shlomo Pongratz3aac6ff2012-05-24 16:08:07 +03002087
Matan Barakc66fa192015-05-31 09:30:16 +03002088 /* no eqs were allocated */
Shlomo Pongratz3aac6ff2012-05-24 16:08:07 +03002089 if (!ibdev->eq_table)
2090 return;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002091
2092 /* Reset the advertised EQ number */
Matan Barakc66fa192015-05-31 09:30:16 +03002093 ibdev->ib_dev.num_comp_vectors = 0;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002094
Matan Barakc66fa192015-05-31 09:30:16 +03002095 for (i = 0; i < total_eqs; i++)
Shlomo Pongratze605b742012-04-29 17:04:27 +03002096 mlx4_release_eq(dev, ibdev->eq_table[i]);
Shlomo Pongratze605b742012-04-29 17:04:27 +03002097
Shlomo Pongratze605b742012-04-29 17:04:27 +03002098 kfree(ibdev->eq_table);
Matan Barakc66fa192015-05-31 09:30:16 +03002099 ibdev->eq_table = NULL;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002100}
2101
Ira Weiny77386132015-05-13 20:02:58 -04002102static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2103 struct ib_port_immutable *immutable)
2104{
2105 struct ib_port_attr attr;
2106 int err;
2107
2108 err = mlx4_ib_query_port(ibdev, port_num, &attr);
2109 if (err)
2110 return err;
2111
2112 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2113 immutable->gid_tbl_len = attr.gid_tbl_len;
2114
Ira Weinyf9b22e32015-05-13 20:02:59 -04002115 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND)
2116 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
2117 else
2118 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2119
Ira Weiny337877a2015-06-06 14:38:29 -04002120 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2121
Ira Weiny77386132015-05-13 20:02:58 -04002122 return 0;
2123}
2124
Roland Dreier225c7b12007-05-08 18:00:38 -07002125static void *mlx4_ib_add(struct mlx4_dev *dev)
2126{
2127 struct mlx4_ib_dev *ibdev;
Roland Dreier22e7ef92009-01-09 13:22:29 -08002128 int num_ports = 0;
Jack Morgenstein035b1032012-05-10 23:28:09 +03002129 int i, j;
Eli Cohenfa417f72010-10-24 21:08:52 -07002130 int err;
2131 struct mlx4_ib_iboe *iboe;
Matan Barak41966702014-02-02 17:06:47 +02002132 int ib_num_ports = 0;
Moni Shouaa5750092015-02-03 16:48:37 +02002133 int num_req_counters;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002134 int allocated;
2135 u32 counter_index;
Roland Dreier225c7b12007-05-08 18:00:38 -07002136
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03002137 pr_info_once("%s", mlx4_ib_version);
Roland Dreier68f39482008-02-04 20:20:44 -08002138
Jack Morgenstein026149c2012-08-03 08:40:55 +00002139 num_ports = 0;
Eli Cohenfa417f72010-10-24 21:08:52 -07002140 mlx4_foreach_ib_transport_port(i, dev)
Roland Dreier22e7ef92009-01-09 13:22:29 -08002141 num_ports++;
2142
2143 /* No point in registering a device with no ports... */
2144 if (num_ports == 0)
2145 return NULL;
2146
Roland Dreier225c7b12007-05-08 18:00:38 -07002147 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
2148 if (!ibdev) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002149 dev_err(&dev->persist->pdev->dev,
2150 "Device struct alloc failed\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07002151 return NULL;
2152 }
2153
Eli Cohenfa417f72010-10-24 21:08:52 -07002154 iboe = &ibdev->iboe;
2155
Roland Dreier225c7b12007-05-08 18:00:38 -07002156 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2157 goto err_dealloc;
2158
2159 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2160 goto err_pd;
2161
Roland Dreier4979d182011-01-12 09:50:36 -08002162 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2163 PAGE_SIZE);
Roland Dreier225c7b12007-05-08 18:00:38 -07002164 if (!ibdev->uar_map)
2165 goto err_uar;
Jack Morgenstein26c6bc72007-05-13 17:18:23 +03002166 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
Roland Dreier225c7b12007-05-08 18:00:38 -07002167
Roland Dreier225c7b12007-05-08 18:00:38 -07002168 ibdev->dev = dev;
Moni Shouac6215742015-02-03 16:48:39 +02002169 ibdev->bond_next_port = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002170
2171 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2172 ibdev->ib_dev.owner = THIS_MODULE;
2173 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
Roland Dreier95d04f02008-07-23 08:12:26 -07002174 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
Roland Dreier22e7ef92009-01-09 13:22:29 -08002175 ibdev->num_ports = num_ports;
Moni Shouaa5750092015-02-03 16:48:37 +02002176 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
2177 1 : ibdev->num_ports;
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -08002178 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002179 ibdev->ib_dev.dma_device = &dev->persist->pdev->dev;
Moni Shoua5070cd22015-07-30 18:33:30 +03002180 ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev;
2181 ibdev->ib_dev.add_gid = mlx4_ib_add_gid;
2182 ibdev->ib_dev.del_gid = mlx4_ib_del_gid;
Roland Dreier225c7b12007-05-08 18:00:38 -07002183
Or Gerlitz08ff3232012-10-21 14:59:24 +00002184 if (dev->caps.userspace_caps)
2185 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2186 else
2187 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2188
Roland Dreier225c7b12007-05-08 18:00:38 -07002189 ibdev->ib_dev.uverbs_cmd_mask =
2190 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2191 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2192 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2193 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2194 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2195 (1ull << IB_USER_VERBS_CMD_REG_MR) |
Matan Barak93769322014-07-31 11:01:30 +03002196 (1ull << IB_USER_VERBS_CMD_REREG_MR) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002197 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2198 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2199 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -07002200 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002201 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2202 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2203 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002204 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002205 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2206 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2207 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2208 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2209 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
Jack Morgenstein65541cb2007-06-21 13:03:11 +03002210 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
Sean Hefty18abd5e2011-06-02 10:43:26 -07002211 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
Sean Hefty42849b22011-08-11 13:57:43 -07002212 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
2213 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
Roland Dreier225c7b12007-05-08 18:00:38 -07002214
2215 ibdev->ib_dev.query_device = mlx4_ib_query_device;
2216 ibdev->ib_dev.query_port = mlx4_ib_query_port;
Eli Cohenfa417f72010-10-24 21:08:52 -07002217 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
Roland Dreier225c7b12007-05-08 18:00:38 -07002218 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
2219 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
2220 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
2221 ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
2222 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
2223 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
2224 ibdev->ib_dev.mmap = mlx4_ib_mmap;
2225 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
2226 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
2227 ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
2228 ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
2229 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
2230 ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
2231 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
Jack Morgenstein65541cb2007-06-21 13:03:11 +03002232 ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
Roland Dreier225c7b12007-05-08 18:00:38 -07002233 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
2234 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
2235 ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
2236 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002237 ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
Roland Dreier225c7b12007-05-08 18:00:38 -07002238 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
2239 ibdev->ib_dev.post_send = mlx4_ib_post_send;
2240 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
2241 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
Eli Cohen3fdcb972008-04-16 21:09:33 -07002242 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -07002243 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
Roland Dreier225c7b12007-05-08 18:00:38 -07002244 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
2245 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
2246 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
2247 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
2248 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
Matan Barak93769322014-07-31 11:01:30 +03002249 ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
Roland Dreier225c7b12007-05-08 18:00:38 -07002250 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
Sagi Grimberg679e34d2015-07-30 10:32:42 +03002251 ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr;
Roland Dreier95d04f02008-07-23 08:12:26 -07002252 ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
2253 ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list;
Roland Dreier225c7b12007-05-08 18:00:38 -07002254 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
2255 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
2256 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
Ira Weiny77386132015-05-13 20:02:58 -04002257 ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
Yishai Hadasae184dd2015-08-13 18:32:06 +03002258 ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
Roland Dreier225c7b12007-05-08 18:00:38 -07002259
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00002260 if (!mlx4_is_slave(ibdev->dev)) {
2261 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
2262 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
2263 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
2264 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
2265 }
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +03002266
Shani Michaelib4253882013-02-06 16:19:16 +00002267 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2268 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2269 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
2270 ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw;
2271 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2272
2273 ibdev->ib_dev.uverbs_cmd_mask |=
2274 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2275 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2276 }
2277
Sean Hefty012a8ff2011-06-02 09:01:33 -07002278 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2279 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2280 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2281 ibdev->ib_dev.uverbs_cmd_mask |=
2282 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2283 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2284 }
2285
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002286 if (check_flow_steering_support(dev)) {
Matan Barak0a9b7d52013-11-07 15:25:15 +02002287 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002288 ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
2289 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
2290
Yann Droneaudf21519b2013-11-06 23:21:49 +01002291 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2292 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2293 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002294 }
2295
Matan Barak4b664c42015-06-11 16:35:27 +03002296 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2297 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
2298 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ);
2299
Shlomo Pongratze605b742012-04-29 17:04:27 +03002300 mlx4_ib_alloc_eqs(dev, ibdev);
2301
Eli Cohenfa417f72010-10-24 21:08:52 -07002302 spin_lock_init(&iboe->lock);
2303
Roland Dreier225c7b12007-05-08 18:00:38 -07002304 if (init_node_data(ibdev))
2305 goto err_map;
2306
Moni Shouaa5750092015-02-03 16:48:37 +02002307 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2308 for (i = 0; i < num_req_counters; ++i) {
Matan Barak9433c182014-05-15 15:29:28 +03002309 mutex_init(&ibdev->qp1_proxy_lock[i]);
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002310 allocated = 0;
Or Gerlitzcfcde112011-06-15 14:49:57 +00002311 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2312 IB_LINK_LAYER_ETHERNET) {
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002313 err = mlx4_counter_alloc(ibdev->dev, &counter_index);
2314 /* if failed to allocate a new counter, use default */
Or Gerlitzcfcde112011-06-15 14:49:57 +00002315 if (err)
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002316 counter_index =
2317 mlx4_get_default_counter_index(dev,
2318 i + 1);
2319 else
2320 allocated = 1;
2321 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2322 counter_index = mlx4_get_default_counter_index(dev,
2323 i + 1);
Dan Carpenter3839d8a2014-03-28 11:21:39 +03002324 }
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002325 ibdev->counters[i].index = counter_index;
2326 ibdev->counters[i].allocated = allocated;
2327 pr_info("counter index %d for port %d allocated %d\n",
2328 counter_index, i + 1, allocated);
Or Gerlitzcfcde112011-06-15 14:49:57 +00002329 }
Moni Shouaa5750092015-02-03 16:48:37 +02002330 if (mlx4_is_bonded(dev))
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002331 for (i = 1; i < ibdev->num_ports ; ++i) {
2332 ibdev->counters[i].index = ibdev->counters[0].index;
2333 ibdev->counters[i].allocated = 0;
2334 }
Or Gerlitzcfcde112011-06-15 14:49:57 +00002335
Matan Barak41966702014-02-02 17:06:47 +02002336 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2337 ib_num_ports++;
2338
Roland Dreier225c7b12007-05-08 18:00:38 -07002339 spin_lock_init(&ibdev->sm_lock);
2340 mutex_init(&ibdev->cap_mask_mutex);
Yishai Hadas35f05da2015-02-08 11:49:34 +02002341 INIT_LIST_HEAD(&ibdev->qp_list);
2342 spin_lock_init(&ibdev->reset_flow_resource_lock);
Roland Dreier225c7b12007-05-08 18:00:38 -07002343
Matan Barak41966702014-02-02 17:06:47 +02002344 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2345 ib_num_ports) {
Matan Barakc1c98502013-11-07 15:25:17 +02002346 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2347 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2348 MLX4_IB_UC_STEER_QPN_ALIGN,
Eugenia Emantayevddae0342014-12-11 10:57:54 +02002349 &ibdev->steer_qpn_base, 0);
Matan Barakc1c98502013-11-07 15:25:17 +02002350 if (err)
2351 goto err_counter;
2352
2353 ibdev->ib_uc_qpns_bitmap =
2354 kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2355 sizeof(long),
2356 GFP_KERNEL);
2357 if (!ibdev->ib_uc_qpns_bitmap) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002358 dev_err(&dev->persist->pdev->dev,
2359 "bit map alloc failed\n");
Matan Barakc1c98502013-11-07 15:25:17 +02002360 goto err_steer_qp_release;
2361 }
2362
2363 bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
2364
2365 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2366 dev, ibdev->steer_qpn_base,
2367 ibdev->steer_qpn_base +
2368 ibdev->steer_qpn_count - 1);
2369 if (err)
2370 goto err_steer_free_bitmap;
2371 }
2372
Jack Morgenstein3e0629c2014-09-11 14:11:17 +03002373 for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2374 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2375
Ralph Campbell9a6edb62010-05-06 17:03:25 -07002376 if (ib_register_device(&ibdev->ib_dev, NULL))
Matan Barakc1c98502013-11-07 15:25:17 +02002377 goto err_steer_free_bitmap;
Roland Dreier225c7b12007-05-08 18:00:38 -07002378
2379 if (mlx4_ib_mad_init(ibdev))
2380 goto err_reg;
2381
Jack Morgensteinfc065732012-08-03 08:40:42 +00002382 if (mlx4_ib_init_sriov(ibdev))
2383 goto err_mad;
2384
Moni Shouad487ee72013-12-12 18:03:13 +02002385 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
2386 if (!iboe->nb.notifier_call) {
2387 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2388 err = register_netdevice_notifier(&iboe->nb);
2389 if (err) {
2390 iboe->nb.notifier_call = NULL;
2391 goto err_notif;
2392 }
2393 }
Eli Cohenfa417f72010-10-24 21:08:52 -07002394 }
2395
Jack Morgenstein035b1032012-05-10 23:28:09 +03002396 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002397 if (device_create_file(&ibdev->ib_dev.dev,
Jack Morgenstein035b1032012-05-10 23:28:09 +03002398 mlx4_class_attributes[j]))
Eli Cohenfa417f72010-10-24 21:08:52 -07002399 goto err_notif;
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002400 }
2401
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07002402 ibdev->ib_active = true;
2403
Jack Morgenstein54679e12012-08-03 08:40:43 +00002404 if (mlx4_is_mfunc(ibdev->dev))
2405 init_pkeys(ibdev);
2406
Jack Morgenstein3806d082012-08-03 08:40:58 +00002407 /* create paravirt contexts for any VFs which are active */
2408 if (mlx4_is_master(ibdev->dev)) {
2409 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2410 if (j == mlx4_master_func_num(ibdev->dev))
2411 continue;
2412 if (mlx4_is_slave_active(ibdev->dev, j))
2413 do_slave_init(ibdev, j, 1);
2414 }
2415 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002416 return ibdev;
2417
Eli Cohenfa417f72010-10-24 21:08:52 -07002418err_notif:
Moni Shouad487ee72013-12-12 18:03:13 +02002419 if (ibdev->iboe.nb.notifier_call) {
2420 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2421 pr_warn("failure unregistering notifier\n");
2422 ibdev->iboe.nb.notifier_call = NULL;
2423 }
Eli Cohenfa417f72010-10-24 21:08:52 -07002424 flush_workqueue(wq);
2425
Jack Morgensteinfc065732012-08-03 08:40:42 +00002426 mlx4_ib_close_sriov(ibdev);
2427
2428err_mad:
2429 mlx4_ib_mad_cleanup(ibdev);
2430
Roland Dreier225c7b12007-05-08 18:00:38 -07002431err_reg:
2432 ib_unregister_device(&ibdev->ib_dev);
2433
Matan Barakc1c98502013-11-07 15:25:17 +02002434err_steer_free_bitmap:
2435 kfree(ibdev->ib_uc_qpns_bitmap);
2436
2437err_steer_qp_release:
2438 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
2439 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2440 ibdev->steer_qpn_count);
Or Gerlitzcfcde112011-06-15 14:49:57 +00002441err_counter:
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002442 for (i = 0; i < ibdev->num_ports; ++i) {
2443 if (ibdev->counters[i].index != -1 &&
2444 ibdev->counters[i].allocated)
2445 mlx4_counter_free(ibdev->dev,
2446 ibdev->counters[i].index);
2447 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002448err_map:
2449 iounmap(ibdev->uar_map);
2450
2451err_uar:
2452 mlx4_uar_free(dev, &ibdev->priv_uar);
2453
2454err_pd:
2455 mlx4_pd_free(dev, ibdev->priv_pdn);
2456
2457err_dealloc:
2458 ib_dealloc_device(&ibdev->ib_dev);
2459
2460 return NULL;
2461}
2462
Matan Barakc1c98502013-11-07 15:25:17 +02002463int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2464{
2465 int offset;
2466
2467 WARN_ON(!dev->ib_uc_qpns_bitmap);
2468
2469 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2470 dev->steer_qpn_count,
2471 get_count_order(count));
2472 if (offset < 0)
2473 return offset;
2474
2475 *qpn = dev->steer_qpn_base + offset;
2476 return 0;
2477}
2478
2479void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2480{
2481 if (!qpn ||
2482 dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2483 return;
2484
2485 BUG_ON(qpn < dev->steer_qpn_base);
2486
2487 bitmap_release_region(dev->ib_uc_qpns_bitmap,
2488 qpn - dev->steer_qpn_base,
2489 get_count_order(count));
2490}
2491
2492int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2493 int is_attach)
2494{
2495 int err;
2496 size_t flow_size;
2497 struct ib_flow_attr *flow = NULL;
2498 struct ib_flow_spec_ib *ib_spec;
2499
2500 if (is_attach) {
2501 flow_size = sizeof(struct ib_flow_attr) +
2502 sizeof(struct ib_flow_spec_ib);
2503 flow = kzalloc(flow_size, GFP_KERNEL);
2504 if (!flow)
2505 return -ENOMEM;
2506 flow->port = mqp->port;
2507 flow->num_of_specs = 1;
2508 flow->size = flow_size;
2509 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2510 ib_spec->type = IB_FLOW_SPEC_IB;
2511 ib_spec->size = sizeof(struct ib_flow_spec_ib);
2512 /* Add an empty rule for IB L2 */
2513 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2514
2515 err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
2516 IB_FLOW_DOMAIN_NIC,
2517 MLX4_FS_REGULAR,
2518 &mqp->reg_id);
2519 } else {
2520 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2521 }
2522 kfree(flow);
2523 return err;
2524}
2525
Roland Dreier225c7b12007-05-08 18:00:38 -07002526static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2527{
2528 struct mlx4_ib_dev *ibdev = ibdev_ptr;
2529 int p;
2530
Moni Shoua4bf97152014-08-21 14:28:42 +03002531 ibdev->ib_active = false;
2532 flush_workqueue(wq);
2533
Jack Morgensteinfc065732012-08-03 08:40:42 +00002534 mlx4_ib_close_sriov(ibdev);
Yevgeny Petrilina6a47772009-03-18 19:49:54 -07002535 mlx4_ib_mad_cleanup(ibdev);
2536 ib_unregister_device(&ibdev->ib_dev);
Eli Cohenfa417f72010-10-24 21:08:52 -07002537 if (ibdev->iboe.nb.notifier_call) {
2538 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03002539 pr_warn("failure unregistering notifier\n");
Eli Cohenfa417f72010-10-24 21:08:52 -07002540 ibdev->iboe.nb.notifier_call = NULL;
2541 }
Matan Barakc1c98502013-11-07 15:25:17 +02002542
2543 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2544 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2545 ibdev->steer_qpn_count);
2546 kfree(ibdev->ib_uc_qpns_bitmap);
2547 }
2548
Eli Cohenfa417f72010-10-24 21:08:52 -07002549 iounmap(ibdev->uar_map);
Or Gerlitzcfcde112011-06-15 14:49:57 +00002550 for (p = 0; p < ibdev->num_ports; ++p)
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002551 if (ibdev->counters[p].index != -1 &&
2552 ibdev->counters[p].allocated)
2553 mlx4_counter_free(ibdev->dev, ibdev->counters[p].index);
Eli Cohenfa417f72010-10-24 21:08:52 -07002554 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
Roland Dreier225c7b12007-05-08 18:00:38 -07002555 mlx4_CLOSE_PORT(dev, p);
2556
Shlomo Pongratze605b742012-04-29 17:04:27 +03002557 mlx4_ib_free_eqs(dev, ibdev);
2558
Roland Dreier225c7b12007-05-08 18:00:38 -07002559 mlx4_uar_free(dev, &ibdev->priv_uar);
2560 mlx4_pd_free(dev, ibdev->priv_pdn);
2561 ib_dealloc_device(&ibdev->ib_dev);
2562}
2563
Jack Morgensteinfc065732012-08-03 08:40:42 +00002564static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2565{
2566 struct mlx4_ib_demux_work **dm = NULL;
2567 struct mlx4_dev *dev = ibdev->dev;
2568 int i;
2569 unsigned long flags;
Matan Barak449fc482014-03-19 18:11:52 +02002570 struct mlx4_active_ports actv_ports;
2571 unsigned int ports;
2572 unsigned int first_port;
Jack Morgensteinfc065732012-08-03 08:40:42 +00002573
2574 if (!mlx4_is_master(dev))
2575 return;
2576
Matan Barak449fc482014-03-19 18:11:52 +02002577 actv_ports = mlx4_get_active_ports(dev, slave);
2578 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2579 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2580
2581 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
Jack Morgensteinfc065732012-08-03 08:40:42 +00002582 if (!dm) {
2583 pr_err("failed to allocate memory for tunneling qp update\n");
Maninder Singha39a98f2015-07-08 09:43:35 +05302584 return;
Jack Morgensteinfc065732012-08-03 08:40:42 +00002585 }
2586
Matan Barak449fc482014-03-19 18:11:52 +02002587 for (i = 0; i < ports; i++) {
Jack Morgensteinfc065732012-08-03 08:40:42 +00002588 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2589 if (!dm[i]) {
2590 pr_err("failed to allocate memory for tunneling qp update work struct\n");
Maninder Singha39a98f2015-07-08 09:43:35 +05302591 while (--i >= 0)
2592 kfree(dm[i]);
Jack Morgensteinfc065732012-08-03 08:40:42 +00002593 goto out;
2594 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00002595 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
Matan Barak449fc482014-03-19 18:11:52 +02002596 dm[i]->port = first_port + i + 1;
Jack Morgensteinfc065732012-08-03 08:40:42 +00002597 dm[i]->slave = slave;
2598 dm[i]->do_init = do_init;
2599 dm[i]->dev = ibdev;
Doug Ledfordd9a047a2015-07-09 10:21:08 -04002600 }
2601 /* initialize or tear down tunnel QPs for the slave */
2602 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2603 if (!ibdev->sriov.is_going_down) {
2604 for (i = 0; i < ports; i++)
Jack Morgensteinfc065732012-08-03 08:40:42 +00002605 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2606 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
Doug Ledfordd9a047a2015-07-09 10:21:08 -04002607 } else {
2608 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2609 for (i = 0; i < ports; i++)
2610 kfree(dm[i]);
Jack Morgensteinfc065732012-08-03 08:40:42 +00002611 }
2612out:
Syam Sidhardhanc89d1272013-02-24 23:20:05 +00002613 kfree(dm);
Jack Morgensteinfc065732012-08-03 08:40:42 +00002614 return;
2615}
2616
Yishai Hadas35f05da2015-02-08 11:49:34 +02002617static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
2618{
2619 struct mlx4_ib_qp *mqp;
2620 unsigned long flags_qp;
2621 unsigned long flags_cq;
2622 struct mlx4_ib_cq *send_mcq, *recv_mcq;
2623 struct list_head cq_notify_list;
2624 struct mlx4_cq *mcq;
2625 unsigned long flags;
2626
2627 pr_warn("mlx4_ib_handle_catas_error was started\n");
2628 INIT_LIST_HEAD(&cq_notify_list);
2629
2630 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2631 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2632
2633 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2634 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
2635 if (mqp->sq.tail != mqp->sq.head) {
2636 send_mcq = to_mcq(mqp->ibqp.send_cq);
2637 spin_lock_irqsave(&send_mcq->lock, flags_cq);
2638 if (send_mcq->mcq.comp &&
2639 mqp->ibqp.send_cq->comp_handler) {
2640 if (!send_mcq->mcq.reset_notify_added) {
2641 send_mcq->mcq.reset_notify_added = 1;
2642 list_add_tail(&send_mcq->mcq.reset_notify,
2643 &cq_notify_list);
2644 }
2645 }
2646 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
2647 }
2648 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
2649 /* Now, handle the QP's receive queue */
2650 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
2651 /* no handling is needed for SRQ */
2652 if (!mqp->ibqp.srq) {
2653 if (mqp->rq.tail != mqp->rq.head) {
2654 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
2655 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
2656 if (recv_mcq->mcq.comp &&
2657 mqp->ibqp.recv_cq->comp_handler) {
2658 if (!recv_mcq->mcq.reset_notify_added) {
2659 recv_mcq->mcq.reset_notify_added = 1;
2660 list_add_tail(&recv_mcq->mcq.reset_notify,
2661 &cq_notify_list);
2662 }
2663 }
2664 spin_unlock_irqrestore(&recv_mcq->lock,
2665 flags_cq);
2666 }
2667 }
2668 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
2669 }
2670
2671 list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
2672 mcq->comp(mcq);
2673 }
2674 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2675 pr_warn("mlx4_ib_handle_catas_error ended\n");
2676}
2677
Moni Shouaa5750092015-02-03 16:48:37 +02002678static void handle_bonded_port_state_event(struct work_struct *work)
2679{
2680 struct ib_event_work *ew =
2681 container_of(work, struct ib_event_work, work);
2682 struct mlx4_ib_dev *ibdev = ew->ib_dev;
2683 enum ib_port_state bonded_port_state = IB_PORT_NOP;
2684 int i;
2685 struct ib_event ibev;
2686
2687 kfree(ew);
2688 spin_lock_bh(&ibdev->iboe.lock);
2689 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
2690 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
Moni Shoua217e8b12015-03-18 16:51:35 +02002691 enum ib_port_state curr_port_state;
Moni Shouaa5750092015-02-03 16:48:37 +02002692
Moni Shoua217e8b12015-03-18 16:51:35 +02002693 if (!curr_netdev)
2694 continue;
2695
2696 curr_port_state =
Moni Shouaa5750092015-02-03 16:48:37 +02002697 (netif_running(curr_netdev) &&
2698 netif_carrier_ok(curr_netdev)) ?
2699 IB_PORT_ACTIVE : IB_PORT_DOWN;
2700
2701 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
2702 curr_port_state : IB_PORT_ACTIVE;
2703 }
2704 spin_unlock_bh(&ibdev->iboe.lock);
2705
2706 ibev.device = &ibdev->ib_dev;
2707 ibev.element.port_num = 1;
2708 ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
2709 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2710
2711 ib_dispatch_event(&ibev);
2712}
2713
Roland Dreier225c7b12007-05-08 18:00:38 -07002714static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002715 enum mlx4_dev_event event, unsigned long param)
Roland Dreier225c7b12007-05-08 18:00:38 -07002716{
2717 struct ib_event ibev;
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07002718 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002719 struct mlx4_eqe *eqe = NULL;
2720 struct ib_event_work *ew;
Jack Morgensteinfc065732012-08-03 08:40:42 +00002721 int p = 0;
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002722
Moni Shouaa5750092015-02-03 16:48:37 +02002723 if (mlx4_is_bonded(dev) &&
2724 ((event == MLX4_DEV_EVENT_PORT_UP) ||
2725 (event == MLX4_DEV_EVENT_PORT_DOWN))) {
2726 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
2727 if (!ew)
2728 return;
2729 INIT_WORK(&ew->work, handle_bonded_port_state_event);
2730 ew->ib_dev = ibdev;
2731 queue_work(wq, &ew->work);
2732 return;
2733 }
2734
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002735 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2736 eqe = (struct mlx4_eqe *)param;
2737 else
Jack Morgensteinfc065732012-08-03 08:40:42 +00002738 p = (int) param;
Roland Dreier225c7b12007-05-08 18:00:38 -07002739
2740 switch (event) {
Roland Dreier37608ee2008-04-16 21:01:08 -07002741 case MLX4_DEV_EVENT_PORT_UP:
Jack Morgensteinfc065732012-08-03 08:40:42 +00002742 if (p > ibdev->num_ports)
2743 return;
Jack Morgensteina0c64a12012-08-03 08:40:49 +00002744 if (mlx4_is_master(dev) &&
2745 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
2746 IB_LINK_LAYER_INFINIBAND) {
2747 mlx4_ib_invalidate_all_guid_record(ibdev, p);
2748 }
Roland Dreier37608ee2008-04-16 21:01:08 -07002749 ibev.event = IB_EVENT_PORT_ACTIVE;
Roland Dreier225c7b12007-05-08 18:00:38 -07002750 break;
2751
Roland Dreier37608ee2008-04-16 21:01:08 -07002752 case MLX4_DEV_EVENT_PORT_DOWN:
Jack Morgensteinfc065732012-08-03 08:40:42 +00002753 if (p > ibdev->num_ports)
2754 return;
Roland Dreier37608ee2008-04-16 21:01:08 -07002755 ibev.event = IB_EVENT_PORT_ERR;
2756 break;
2757
2758 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07002759 ibdev->ib_active = false;
Roland Dreier225c7b12007-05-08 18:00:38 -07002760 ibev.event = IB_EVENT_DEVICE_FATAL;
Yishai Hadas35f05da2015-02-08 11:49:34 +02002761 mlx4_ib_handle_catas_error(ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -07002762 break;
2763
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002764 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
2765 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
2766 if (!ew) {
2767 pr_err("failed to allocate memory for events work\n");
2768 break;
2769 }
2770
2771 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
2772 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
2773 ew->ib_dev = ibdev;
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00002774 /* need to queue only for port owner, which uses GEN_EQE */
2775 if (mlx4_is_master(dev))
2776 queue_work(wq, &ew->work);
2777 else
2778 handle_port_mgmt_change_event(&ew->work);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002779 return;
2780
Jack Morgensteinfc065732012-08-03 08:40:42 +00002781 case MLX4_DEV_EVENT_SLAVE_INIT:
2782 /* here, p is the slave id */
2783 do_slave_init(ibdev, p, 1);
Yishai Hadasee59fa02015-03-03 17:28:49 +02002784 if (mlx4_is_master(dev)) {
2785 int i;
2786
2787 for (i = 1; i <= ibdev->num_ports; i++) {
2788 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
2789 == IB_LINK_LAYER_INFINIBAND)
2790 mlx4_ib_slave_alias_guid_event(ibdev,
2791 p, i,
2792 1);
2793 }
2794 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00002795 return;
2796
2797 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
Yishai Hadasee59fa02015-03-03 17:28:49 +02002798 if (mlx4_is_master(dev)) {
2799 int i;
2800
2801 for (i = 1; i <= ibdev->num_ports; i++) {
2802 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
2803 == IB_LINK_LAYER_INFINIBAND)
2804 mlx4_ib_slave_alias_guid_event(ibdev,
2805 p, i,
2806 0);
2807 }
2808 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00002809 /* here, p is the slave id */
2810 do_slave_init(ibdev, p, 0);
2811 return;
2812
Roland Dreier225c7b12007-05-08 18:00:38 -07002813 default:
2814 return;
2815 }
2816
2817 ibev.device = ibdev_ptr;
Moni Shouaa5750092015-02-03 16:48:37 +02002818 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
Roland Dreier225c7b12007-05-08 18:00:38 -07002819
2820 ib_dispatch_event(&ibev);
2821}
2822
2823static struct mlx4_interface mlx4_ib_interface = {
Eli Cohenfa417f72010-10-24 21:08:52 -07002824 .add = mlx4_ib_add,
2825 .remove = mlx4_ib_remove,
2826 .event = mlx4_ib_event,
Moni Shouaa5750092015-02-03 16:48:37 +02002827 .protocol = MLX4_PROT_IB_IPV6,
2828 .flags = MLX4_INTFF_BONDING
Roland Dreier225c7b12007-05-08 18:00:38 -07002829};
2830
2831static int __init mlx4_ib_init(void)
2832{
Eli Cohenfa417f72010-10-24 21:08:52 -07002833 int err;
2834
2835 wq = create_singlethread_workqueue("mlx4_ib");
2836 if (!wq)
2837 return -ENOMEM;
2838
Oren Duerb9c5d6a2012-08-03 08:40:46 +00002839 err = mlx4_ib_mcg_init();
2840 if (err)
2841 goto clean_wq;
2842
Eli Cohenfa417f72010-10-24 21:08:52 -07002843 err = mlx4_register_interface(&mlx4_ib_interface);
Oren Duerb9c5d6a2012-08-03 08:40:46 +00002844 if (err)
2845 goto clean_mcg;
Eli Cohenfa417f72010-10-24 21:08:52 -07002846
2847 return 0;
Oren Duerb9c5d6a2012-08-03 08:40:46 +00002848
2849clean_mcg:
2850 mlx4_ib_mcg_destroy();
2851
2852clean_wq:
2853 destroy_workqueue(wq);
2854 return err;
Roland Dreier225c7b12007-05-08 18:00:38 -07002855}
2856
2857static void __exit mlx4_ib_cleanup(void)
2858{
2859 mlx4_unregister_interface(&mlx4_ib_interface);
Oren Duerb9c5d6a2012-08-03 08:40:46 +00002860 mlx4_ib_mcg_destroy();
Eli Cohenfa417f72010-10-24 21:08:52 -07002861 destroy_workqueue(wq);
Roland Dreier225c7b12007-05-08 18:00:38 -07002862}
2863
2864module_init(mlx4_ib_init);
2865module_exit(mlx4_ib_cleanup);