blob: b91b4865d63578b01b3bc500d6fd84d097d603a6 [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_mad.h>
34#include <rdma/ib_smi.h>
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +000035#include <rdma/ib_sa.h>
36#include <rdma/ib_cache.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070037
38#include <linux/mlx4/cmd.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/gfp.h>
Or Gerlitzc37791349c2011-06-15 14:51:27 +000040#include <rdma/ib_pma.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070041
42#include "mlx4_ib.h"
43
44enum {
45 MLX4_IB_VENDOR_CLASS1 = 0x9,
46 MLX4_IB_VENDOR_CLASS2 = 0xa
47};
48
Jack Morgensteinfc065732012-08-03 08:40:42 +000049#define MLX4_TUN_SEND_WRID_SHIFT 34
50#define MLX4_TUN_QPN_SHIFT 32
51#define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
52#define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
53
54#define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
55#define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
56
Jack Morgenstein2a4fae12012-08-03 08:40:50 +000057 /* Port mgmt change event handling */
58
59#define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
60#define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
61#define NUM_IDX_IN_PKEY_TBL_BLK 32
62#define GUID_TBL_ENTRY_SIZE 8 /* size in bytes */
63#define GUID_TBL_BLK_NUM_ENTRIES 8
64#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
65
Jack Morgensteinfc065732012-08-03 08:40:42 +000066struct mlx4_mad_rcv_buf {
67 struct ib_grh grh;
68 u8 payload[256];
69} __packed;
70
71struct mlx4_mad_snd_buf {
72 u8 payload[256];
73} __packed;
74
75struct mlx4_tunnel_mad {
76 struct ib_grh grh;
77 struct mlx4_ib_tunnel_header hdr;
78 struct ib_mad mad;
79} __packed;
80
81struct mlx4_rcv_tunnel_mad {
82 struct mlx4_rcv_tunnel_hdr hdr;
83 struct ib_grh grh;
84 struct ib_mad mad;
85} __packed;
86
Oren Duerb9c5d6a2012-08-03 08:40:46 +000087static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
Jack Morgenstein2a4fae12012-08-03 08:40:50 +000088static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
89static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
90 int block, u32 change_bitmap);
Oren Duerb9c5d6a2012-08-03 08:40:46 +000091
92__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
93{
94 return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
95 cpu_to_be64(0xff00000000000000LL);
96}
97
Jack Morgenstein0a9a0182012-08-03 08:40:45 +000098int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
Roland Dreier225c7b12007-05-08 18:00:38 -070099 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
100 void *in_mad, void *response_mad)
101{
102 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
103 void *inbox;
104 int err;
105 u32 in_modifier = port;
106 u8 op_modifier = 0;
107
108 inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
109 if (IS_ERR(inmailbox))
110 return PTR_ERR(inmailbox);
111 inbox = inmailbox->buf;
112
113 outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
114 if (IS_ERR(outmailbox)) {
115 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
116 return PTR_ERR(outmailbox);
117 }
118
119 memcpy(inbox, in_mad, 256);
120
121 /*
122 * Key check traps can't be generated unless we have in_wc to
123 * tell us where to send the trap.
124 */
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000125 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
Roland Dreier225c7b12007-05-08 18:00:38 -0700126 op_modifier |= 0x1;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000127 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
Roland Dreier225c7b12007-05-08 18:00:38 -0700128 op_modifier |= 0x2;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000129 if (mlx4_is_mfunc(dev->dev) &&
130 (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
131 op_modifier |= 0x8;
Roland Dreier225c7b12007-05-08 18:00:38 -0700132
133 if (in_wc) {
134 struct {
135 __be32 my_qpn;
136 u32 reserved1;
137 __be32 rqpn;
138 u8 sl;
139 u8 g_path;
140 u16 reserved2[2];
141 __be16 pkey;
142 u32 reserved3[11];
143 u8 grh[40];
144 } *ext_info;
145
146 memset(inbox + 256, 0, 256);
147 ext_info = inbox + 256;
148
149 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
150 ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
151 ext_info->sl = in_wc->sl << 4;
152 ext_info->g_path = in_wc->dlid_path_bits |
153 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
154 ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
155
156 if (in_grh)
157 memcpy(ext_info->grh, in_grh, 40);
158
159 op_modifier |= 0x4;
160
161 in_modifier |= in_wc->slid << 16;
162 }
163
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000164 err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
165 mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
Jack Morgensteinf9baff52011-12-13 04:10:51 +0000166 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000167 (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
Roland Dreier225c7b12007-05-08 18:00:38 -0700168
Ilpo Järvinenfe11cb62007-08-16 01:02:07 +0300169 if (!err)
Roland Dreier225c7b12007-05-08 18:00:38 -0700170 memcpy(response_mad, outmailbox->buf, 256);
171
172 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
173 mlx4_free_cmd_mailbox(dev->dev, outmailbox);
174
175 return err;
176}
177
178static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
179{
180 struct ib_ah *new_ah;
181 struct ib_ah_attr ah_attr;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000182 unsigned long flags;
Roland Dreier225c7b12007-05-08 18:00:38 -0700183
184 if (!dev->send_agent[port_num - 1][0])
185 return;
186
187 memset(&ah_attr, 0, sizeof ah_attr);
188 ah_attr.dlid = lid;
189 ah_attr.sl = sl;
190 ah_attr.port_num = port_num;
191
192 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
193 &ah_attr);
194 if (IS_ERR(new_ah))
195 return;
196
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000197 spin_lock_irqsave(&dev->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700198 if (dev->sm_ah[port_num - 1])
199 ib_destroy_ah(dev->sm_ah[port_num - 1]);
200 dev->sm_ah[port_num - 1] = new_ah;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000201 spin_unlock_irqrestore(&dev->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700202}
203
204/*
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300205 * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
206 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
Roland Dreier225c7b12007-05-08 18:00:38 -0700207 */
Moni Shouaf0f6f342009-01-28 14:54:35 -0800208static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300209 u16 prev_lid)
Roland Dreier225c7b12007-05-08 18:00:38 -0700210{
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300211 struct ib_port_info *pinfo;
212 u16 lid;
Jack Morgenstein54679e12012-08-03 08:40:43 +0000213 __be16 *base;
214 u32 bn, pkey_change_bitmap;
215 int i;
216
Roland Dreier225c7b12007-05-08 18:00:38 -0700217
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300218 struct mlx4_ib_dev *dev = to_mdev(ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -0700219 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
220 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300221 mad->mad_hdr.method == IB_MGMT_METHOD_SET)
222 switch (mad->mad_hdr.attr_id) {
223 case IB_SMP_ATTR_PORT_INFO:
224 pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
225 lid = be16_to_cpu(pinfo->lid);
Roland Dreier225c7b12007-05-08 18:00:38 -0700226
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300227 update_sm_ah(dev, port_num,
Roland Dreier225c7b12007-05-08 18:00:38 -0700228 be16_to_cpu(pinfo->sm_lid),
229 pinfo->neighbormtu_mastersmsl & 0xf);
230
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300231 if (pinfo->clientrereg_resv_subnetto & 0x80)
Oren Duerb9c5d6a2012-08-03 08:40:46 +0000232 handle_client_rereg_event(dev, port_num);
Roland Dreier225c7b12007-05-08 18:00:38 -0700233
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300234 if (prev_lid != lid)
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000235 handle_lid_change_event(dev, port_num);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300236 break;
Roland Dreier225c7b12007-05-08 18:00:38 -0700237
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300238 case IB_SMP_ATTR_PKEY_TABLE:
Jack Morgenstein54679e12012-08-03 08:40:43 +0000239 if (!mlx4_is_mfunc(dev->dev)) {
240 mlx4_ib_dispatch_event(dev, port_num,
241 IB_EVENT_PKEY_CHANGE);
242 break;
243 }
244
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000245 /* at this point, we are running in the master.
246 * Slaves do not receive SMPs.
247 */
Jack Morgenstein54679e12012-08-03 08:40:43 +0000248 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
249 base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
250 pkey_change_bitmap = 0;
251 for (i = 0; i < 32; i++) {
252 pr_debug("PKEY[%d] = x%x\n",
253 i + bn*32, be16_to_cpu(base[i]));
254 if (be16_to_cpu(base[i]) !=
255 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
256 pkey_change_bitmap |= (1 << i);
257 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
258 be16_to_cpu(base[i]);
259 }
260 }
261 pr_debug("PKEY Change event: port=%d, "
262 "block=0x%x, change_bitmap=0x%x\n",
263 port_num, bn, pkey_change_bitmap);
264
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000265 if (pkey_change_bitmap) {
Jack Morgenstein54679e12012-08-03 08:40:43 +0000266 mlx4_ib_dispatch_event(dev, port_num,
267 IB_EVENT_PKEY_CHANGE);
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000268 if (!dev->sriov.is_going_down)
269 __propagate_pkey_ev(dev, port_num, bn,
270 pkey_change_bitmap);
271 }
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300272 break;
273
274 case IB_SMP_ATTR_GUID_INFO:
Jack Morgenstein66349612012-06-19 11:21:44 +0300275 /* paravirtualized master's guid is guid 0 -- does not change */
276 if (!mlx4_is_master(dev->dev))
277 mlx4_ib_dispatch_event(dev, port_num,
278 IB_EVENT_GID_CHANGE);
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000279 /*if master, notify relevant slaves*/
280 if (mlx4_is_master(dev->dev) &&
281 !dev->sriov.is_going_down) {
282 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
283 mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
284 (u8 *)(&((struct ib_smp *)mad)->data));
285 mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
286 (u8 *)(&((struct ib_smp *)mad)->data));
287 }
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300288 break;
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000289
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300290 default:
291 break;
Roland Dreier225c7b12007-05-08 18:00:38 -0700292 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700293}
294
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000295static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
296 int block, u32 change_bitmap)
297{
298 int i, ix, slave, err;
299 int have_event = 0;
300
301 for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
302 if (slave == mlx4_master_func_num(dev->dev))
303 continue;
304 if (!mlx4_is_slave_active(dev->dev, slave))
305 continue;
306
307 have_event = 0;
308 for (i = 0; i < 32; i++) {
309 if (!(change_bitmap & (1 << i)))
310 continue;
311 for (ix = 0;
312 ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
313 if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
314 [ix] == i + 32 * block) {
315 err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
316 pr_debug("propagate_pkey_ev: slave %d,"
317 " port %d, ix %d (%d)\n",
318 slave, port_num, ix, err);
319 have_event = 1;
320 break;
321 }
322 }
323 if (have_event)
324 break;
325 }
326 }
327}
328
Roland Dreier225c7b12007-05-08 18:00:38 -0700329static void node_desc_override(struct ib_device *dev,
330 struct ib_mad *mad)
331{
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000332 unsigned long flags;
333
Roland Dreier225c7b12007-05-08 18:00:38 -0700334 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
335 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
336 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
337 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000338 spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700339 memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000340 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700341 }
342}
343
344static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad)
345{
346 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
347 struct ib_mad_send_buf *send_buf;
348 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
349 int ret;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000350 unsigned long flags;
Roland Dreier225c7b12007-05-08 18:00:38 -0700351
352 if (agent) {
353 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
354 IB_MGMT_MAD_DATA, GFP_ATOMIC);
Dan Carpenter13974902011-01-10 17:42:06 -0800355 if (IS_ERR(send_buf))
356 return;
Roland Dreier225c7b12007-05-08 18:00:38 -0700357 /*
358 * We rely here on the fact that MLX QPs don't use the
359 * address handle after the send is posted (this is
360 * wrong following the IB spec strictly, but we know
361 * it's OK for our devices).
362 */
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000363 spin_lock_irqsave(&dev->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700364 memcpy(send_buf->mad, mad, sizeof *mad);
365 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
366 ret = ib_post_send_mad(send_buf, NULL);
367 else
368 ret = -EINVAL;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000369 spin_unlock_irqrestore(&dev->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700370
371 if (ret)
372 ib_free_send_mad(send_buf);
373 }
374}
375
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +0000376static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
377 struct ib_sa_mad *sa_mad)
378{
Oren Duerb9c5d6a2012-08-03 08:40:46 +0000379 int ret = 0;
380
381 /* dispatch to different sa handlers */
382 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
383 case IB_SA_ATTR_MC_MEMBER_REC:
384 ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
385 break;
386 default:
387 break;
388 }
389 return ret;
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +0000390}
391
392int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
393{
394 struct mlx4_ib_dev *dev = to_mdev(ibdev);
395 int i;
396
397 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
398 if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
399 return i;
400 }
401 return -1;
402}
403
404
405static int get_pkey_phys_indices(struct mlx4_ib_dev *ibdev, u8 port, u8 ph_pkey_ix,
406 u8 *full_pk_ix, u8 *partial_pk_ix,
407 int *is_full_member)
408{
409 u16 search_pkey;
410 int fm;
411 int err = 0;
412 u16 pk;
413
414 err = ib_get_cached_pkey(&ibdev->ib_dev, port, ph_pkey_ix, &search_pkey);
415 if (err)
416 return err;
417
418 fm = (search_pkey & 0x8000) ? 1 : 0;
419 if (fm) {
420 *full_pk_ix = ph_pkey_ix;
421 search_pkey &= 0x7FFF;
422 } else {
423 *partial_pk_ix = ph_pkey_ix;
424 search_pkey |= 0x8000;
425 }
426
427 if (ib_find_exact_cached_pkey(&ibdev->ib_dev, port, search_pkey, &pk))
428 pk = 0xFFFF;
429
430 if (fm)
431 *partial_pk_ix = (pk & 0xFF);
432 else
433 *full_pk_ix = (pk & 0xFF);
434
435 *is_full_member = fm;
436 return err;
437}
438
439int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
440 enum ib_qp_type dest_qpt, struct ib_wc *wc,
441 struct ib_grh *grh, struct ib_mad *mad)
442{
443 struct ib_sge list;
444 struct ib_send_wr wr, *bad_wr;
445 struct mlx4_ib_demux_pv_ctx *tun_ctx;
446 struct mlx4_ib_demux_pv_qp *tun_qp;
447 struct mlx4_rcv_tunnel_mad *tun_mad;
448 struct ib_ah_attr attr;
449 struct ib_ah *ah;
450 struct ib_qp *src_qp = NULL;
451 unsigned tun_tx_ix = 0;
452 int dqpn;
453 int ret = 0;
454 int i;
455 int is_full_member = 0;
456 u16 tun_pkey_ix;
457 u8 ph_pkey_ix, full_pk_ix = 0, partial_pk_ix = 0;
458
459 if (dest_qpt > IB_QPT_GSI)
460 return -EINVAL;
461
462 tun_ctx = dev->sriov.demux[port-1].tun[slave];
463
464 /* check if proxy qp created */
465 if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
466 return -EAGAIN;
467
468 /* QP0 forwarding only for Dom0 */
469 if (!dest_qpt && (mlx4_master_func_num(dev->dev) != slave))
470 return -EINVAL;
471
472 if (!dest_qpt)
473 tun_qp = &tun_ctx->qp[0];
474 else
475 tun_qp = &tun_ctx->qp[1];
476
477 /* compute pkey index for slave */
478 /* get physical pkey -- virtualized Dom0 pkey to phys*/
479 if (dest_qpt) {
480 ph_pkey_ix =
481 dev->pkeys.virt2phys_pkey[mlx4_master_func_num(dev->dev)][port - 1][wc->pkey_index];
482
483 /* now, translate this to the slave pkey index */
484 ret = get_pkey_phys_indices(dev, port, ph_pkey_ix, &full_pk_ix,
485 &partial_pk_ix, &is_full_member);
486 if (ret)
487 return -EINVAL;
488
489 for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
490 if ((dev->pkeys.virt2phys_pkey[slave][port - 1][i] == full_pk_ix) ||
491 (is_full_member &&
492 (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == partial_pk_ix)))
493 break;
494 }
495 if (i == dev->dev->caps.pkey_table_len[port])
496 return -EINVAL;
497 tun_pkey_ix = i;
498 } else
499 tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
500
501 dqpn = dev->dev->caps.sqp_start + 8 * slave + port + (dest_qpt * 2) - 1;
502
503 /* get tunnel tx data buf for slave */
504 src_qp = tun_qp->qp;
505
506 /* create ah. Just need an empty one with the port num for the post send.
507 * The driver will set the force loopback bit in post_send */
508 memset(&attr, 0, sizeof attr);
509 attr.port_num = port;
510 ah = ib_create_ah(tun_ctx->pd, &attr);
511 if (IS_ERR(ah))
512 return -ENOMEM;
513
514 /* allocate tunnel tx buf after pass failure returns */
515 spin_lock(&tun_qp->tx_lock);
516 if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
517 (MLX4_NUM_TUNNEL_BUFS - 1))
518 ret = -EAGAIN;
519 else
520 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
521 spin_unlock(&tun_qp->tx_lock);
522 if (ret)
523 goto out;
524
525 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
526 if (tun_qp->tx_ring[tun_tx_ix].ah)
527 ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
528 tun_qp->tx_ring[tun_tx_ix].ah = ah;
529 ib_dma_sync_single_for_cpu(&dev->ib_dev,
530 tun_qp->tx_ring[tun_tx_ix].buf.map,
531 sizeof (struct mlx4_rcv_tunnel_mad),
532 DMA_TO_DEVICE);
533
534 /* copy over to tunnel buffer */
535 if (grh)
536 memcpy(&tun_mad->grh, grh, sizeof *grh);
537 memcpy(&tun_mad->mad, mad, sizeof *mad);
538
539 /* adjust tunnel data */
540 tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
541 tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
542 tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
543 tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
544 tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
545
546 ib_dma_sync_single_for_device(&dev->ib_dev,
547 tun_qp->tx_ring[tun_tx_ix].buf.map,
548 sizeof (struct mlx4_rcv_tunnel_mad),
549 DMA_TO_DEVICE);
550
551 list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
552 list.length = sizeof (struct mlx4_rcv_tunnel_mad);
553 list.lkey = tun_ctx->mr->lkey;
554
555 wr.wr.ud.ah = ah;
556 wr.wr.ud.port_num = port;
557 wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
558 wr.wr.ud.remote_qpn = dqpn;
559 wr.next = NULL;
560 wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
561 wr.sg_list = &list;
562 wr.num_sge = 1;
563 wr.opcode = IB_WR_SEND;
564 wr.send_flags = IB_SEND_SIGNALED;
565
566 ret = ib_post_send(src_qp, &wr, &bad_wr);
567out:
568 if (ret)
569 ib_destroy_ah(ah);
570 return ret;
571}
572
573static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
574 struct ib_wc *wc, struct ib_grh *grh,
575 struct ib_mad *mad)
576{
577 struct mlx4_ib_dev *dev = to_mdev(ibdev);
578 int err;
579 int slave;
580 u8 *slave_id;
581
582 /* Initially assume that this mad is for us */
583 slave = mlx4_master_func_num(dev->dev);
584
585 /* See if the slave id is encoded in a response mad */
586 if (mad->mad_hdr.method & 0x80) {
587 slave_id = (u8 *) &mad->mad_hdr.tid;
588 slave = *slave_id;
589 if (slave != 255) /*255 indicates the dom0*/
590 *slave_id = 0; /* remap tid */
591 }
592
593 /* If a grh is present, we demux according to it */
594 if (wc->wc_flags & IB_WC_GRH) {
595 slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
596 if (slave < 0) {
597 mlx4_ib_warn(ibdev, "failed matching grh\n");
598 return -ENOENT;
599 }
600 }
601 /* Class-specific handling */
602 switch (mad->mad_hdr.mgmt_class) {
603 case IB_MGMT_CLASS_SUBN_ADM:
604 if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
605 (struct ib_sa_mad *) mad))
606 return 0;
607 break;
Amir Vadai3cf69cc2012-08-03 08:40:47 +0000608 case IB_MGMT_CLASS_CM:
609 if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad))
610 return 0;
611 break;
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +0000612 case IB_MGMT_CLASS_DEVICE_MGMT:
613 if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
614 return 0;
615 break;
616 default:
617 /* Drop unsupported classes for slaves in tunnel mode */
618 if (slave != mlx4_master_func_num(dev->dev)) {
619 pr_debug("dropping unsupported ingress mad from class:%d "
620 "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
621 return 0;
622 }
623 }
624 /*make sure that no slave==255 was not handled yet.*/
625 if (slave >= dev->dev->caps.sqp_demux) {
626 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
627 slave, dev->dev->caps.sqp_demux);
628 return -ENOENT;
629 }
630
631 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
632 if (err)
633 pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
634 slave, err);
635 return 0;
636}
637
Or Gerlitzc37791349c2011-06-15 14:51:27 +0000638static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
Roland Dreier225c7b12007-05-08 18:00:38 -0700639 struct ib_wc *in_wc, struct ib_grh *in_grh,
640 struct ib_mad *in_mad, struct ib_mad *out_mad)
641{
Moni Shouaf0f6f342009-01-28 14:54:35 -0800642 u16 slid, prev_lid = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -0700643 int err;
Moni Shouaf0f6f342009-01-28 14:54:35 -0800644 struct ib_port_attr pattr;
Roland Dreier225c7b12007-05-08 18:00:38 -0700645
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +0300646 if (in_wc && in_wc->qp->qp_num) {
647 pr_debug("received MAD: slid:%d sqpn:%d "
648 "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
649 in_wc->slid, in_wc->src_qp,
650 in_wc->dlid_path_bits,
651 in_wc->qp->qp_num,
652 in_wc->wc_flags,
653 in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
654 be16_to_cpu(in_mad->mad_hdr.attr_id));
655 if (in_wc->wc_flags & IB_WC_GRH) {
656 pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
657 be64_to_cpu(in_grh->sgid.global.subnet_prefix),
658 be64_to_cpu(in_grh->sgid.global.interface_id));
659 pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
660 be64_to_cpu(in_grh->dgid.global.subnet_prefix),
661 be64_to_cpu(in_grh->dgid.global.interface_id));
662 }
663 }
664
Roland Dreier225c7b12007-05-08 18:00:38 -0700665 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
666
667 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
668 forward_trap(to_mdev(ibdev), port_num, in_mad);
669 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
670 }
671
672 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
673 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
674 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
675 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
676 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
677 return IB_MAD_RESULT_SUCCESS;
678
679 /*
Jack Morgensteina6f7fea2012-01-26 16:41:33 +0200680 * Don't process SMInfo queries -- the SMA can't handle them.
Roland Dreier225c7b12007-05-08 18:00:38 -0700681 */
Jack Morgensteina6f7fea2012-01-26 16:41:33 +0200682 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
Roland Dreier225c7b12007-05-08 18:00:38 -0700683 return IB_MAD_RESULT_SUCCESS;
684 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
685 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
Eli Cohen6578cf32008-07-14 23:48:45 -0700686 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 ||
687 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700688 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
689 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
690 return IB_MAD_RESULT_SUCCESS;
691 } else
692 return IB_MAD_RESULT_SUCCESS;
693
Moni Shouaf0f6f342009-01-28 14:54:35 -0800694 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
695 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
696 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
697 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
698 !ib_query_port(ibdev, port_num, &pattr))
699 prev_lid = pattr.lid;
700
Roland Dreier225c7b12007-05-08 18:00:38 -0700701 err = mlx4_MAD_IFC(to_mdev(ibdev),
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000702 (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
703 (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
704 MLX4_MAD_IFC_NET_VIEW,
Roland Dreier225c7b12007-05-08 18:00:38 -0700705 port_num, in_wc, in_grh, in_mad, out_mad);
706 if (err)
707 return IB_MAD_RESULT_FAILURE;
708
709 if (!out_mad->mad_hdr.status) {
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300710 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
711 smp_snoop(ibdev, port_num, in_mad, prev_lid);
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +0000712 /* slaves get node desc from FW */
713 if (!mlx4_is_slave(to_mdev(ibdev)->dev))
714 node_desc_override(ibdev, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700715 }
716
717 /* set return bit in status of directed route responses */
718 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
719 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
720
721 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
722 /* no response for trap repress */
723 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
724
725 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
726}
727
Or Gerlitzc37791349c2011-06-15 14:51:27 +0000728static void edit_counter(struct mlx4_counter *cnt,
729 struct ib_pma_portcounters *pma_cnt)
730{
731 pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
732 pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
733 pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
734 pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
735}
736
737static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
738 struct ib_wc *in_wc, struct ib_grh *in_grh,
739 struct ib_mad *in_mad, struct ib_mad *out_mad)
740{
741 struct mlx4_cmd_mailbox *mailbox;
742 struct mlx4_ib_dev *dev = to_mdev(ibdev);
743 int err;
744 u32 inmod = dev->counters[port_num - 1] & 0xffff;
745 u8 mode;
746
747 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
748 return -EINVAL;
749
750 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
751 if (IS_ERR(mailbox))
752 return IB_MAD_RESULT_FAILURE;
753
754 err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
Jack Morgensteinf9baff52011-12-13 04:10:51 +0000755 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
756 MLX4_CMD_WRAPPED);
Or Gerlitzc37791349c2011-06-15 14:51:27 +0000757 if (err)
758 err = IB_MAD_RESULT_FAILURE;
759 else {
760 memset(out_mad->data, 0, sizeof out_mad->data);
761 mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode;
762 switch (mode & 0xf) {
763 case 0:
764 edit_counter(mailbox->buf,
765 (void *)(out_mad->data + 40));
766 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
767 break;
768 default:
769 err = IB_MAD_RESULT_FAILURE;
770 }
771 }
772
773 mlx4_free_cmd_mailbox(dev->dev, mailbox);
774
775 return err;
776}
777
778int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
779 struct ib_wc *in_wc, struct ib_grh *in_grh,
780 struct ib_mad *in_mad, struct ib_mad *out_mad)
781{
782 switch (rdma_port_get_link_layer(ibdev, port_num)) {
783 case IB_LINK_LAYER_INFINIBAND:
784 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
785 in_grh, in_mad, out_mad);
786 case IB_LINK_LAYER_ETHERNET:
787 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
788 in_grh, in_mad, out_mad);
789 default:
790 return -EINVAL;
791 }
792}
793
Roland Dreier225c7b12007-05-08 18:00:38 -0700794static void send_handler(struct ib_mad_agent *agent,
795 struct ib_mad_send_wc *mad_send_wc)
796{
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +0000797 if (mad_send_wc->send_buf->context[0])
798 ib_destroy_ah(mad_send_wc->send_buf->context[0]);
Roland Dreier225c7b12007-05-08 18:00:38 -0700799 ib_free_send_mad(mad_send_wc->send_buf);
800}
801
802int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
803{
804 struct ib_mad_agent *agent;
805 int p, q;
806 int ret;
Eli Cohenfa417f72010-10-24 21:08:52 -0700807 enum rdma_link_layer ll;
Roland Dreier225c7b12007-05-08 18:00:38 -0700808
Eli Cohenfa417f72010-10-24 21:08:52 -0700809 for (p = 0; p < dev->num_ports; ++p) {
810 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
Roland Dreier225c7b12007-05-08 18:00:38 -0700811 for (q = 0; q <= 1; ++q) {
Eli Cohenfa417f72010-10-24 21:08:52 -0700812 if (ll == IB_LINK_LAYER_INFINIBAND) {
813 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
814 q ? IB_QPT_GSI : IB_QPT_SMI,
815 NULL, 0, send_handler,
816 NULL, NULL);
817 if (IS_ERR(agent)) {
818 ret = PTR_ERR(agent);
819 goto err;
820 }
821 dev->send_agent[p][q] = agent;
822 } else
823 dev->send_agent[p][q] = NULL;
Roland Dreier225c7b12007-05-08 18:00:38 -0700824 }
Eli Cohenfa417f72010-10-24 21:08:52 -0700825 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700826
827 return 0;
828
829err:
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -0700830 for (p = 0; p < dev->num_ports; ++p)
Roland Dreier225c7b12007-05-08 18:00:38 -0700831 for (q = 0; q <= 1; ++q)
832 if (dev->send_agent[p][q])
833 ib_unregister_mad_agent(dev->send_agent[p][q]);
834
835 return ret;
836}
837
838void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
839{
840 struct ib_mad_agent *agent;
841 int p, q;
842
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -0700843 for (p = 0; p < dev->num_ports; ++p) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700844 for (q = 0; q <= 1; ++q) {
845 agent = dev->send_agent[p][q];
Eli Cohenfa417f72010-10-24 21:08:52 -0700846 if (agent) {
847 dev->send_agent[p][q] = NULL;
848 ib_unregister_mad_agent(agent);
849 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700850 }
851
852 if (dev->sm_ah[p])
853 ib_destroy_ah(dev->sm_ah[p]);
854 }
855}
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300856
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000857static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
858{
859 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
860
861 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
862 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
863 MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
864}
865
Oren Duerb9c5d6a2012-08-03 08:40:46 +0000866static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
867{
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000868 /* re-configure the alias-guid and mcg's */
Oren Duerb9c5d6a2012-08-03 08:40:46 +0000869 if (mlx4_is_master(dev->dev)) {
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000870 mlx4_ib_invalidate_all_guid_record(dev, port_num);
871
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000872 if (!dev->sriov.is_going_down) {
Oren Duerb9c5d6a2012-08-03 08:40:46 +0000873 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000874 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
875 MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
876 }
Oren Duerb9c5d6a2012-08-03 08:40:46 +0000877 }
878 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
879}
880
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000881static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
882 struct mlx4_eqe *eqe)
883{
884 __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
885 GET_MASK_FROM_EQE(eqe));
886}
887
888static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
889 u32 guid_tbl_blk_num, u32 change_bitmap)
890{
891 struct ib_smp *in_mad = NULL;
892 struct ib_smp *out_mad = NULL;
893 u16 i;
894
895 if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
896 return;
897
898 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
899 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
900 if (!in_mad || !out_mad) {
901 mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
902 goto out;
903 }
904
905 guid_tbl_blk_num *= 4;
906
907 for (i = 0; i < 4; i++) {
908 if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
909 continue;
910 memset(in_mad, 0, sizeof *in_mad);
911 memset(out_mad, 0, sizeof *out_mad);
912
913 in_mad->base_version = 1;
914 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
915 in_mad->class_version = 1;
916 in_mad->method = IB_MGMT_METHOD_GET;
917 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
918 in_mad->attr_mod = cpu_to_be32(guid_tbl_blk_num + i);
919
920 if (mlx4_MAD_IFC(dev,
921 MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
922 port_num, NULL, NULL, in_mad, out_mad)) {
923 mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
924 goto out;
925 }
926
927 mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
928 port_num,
929 (u8 *)(&((struct ib_smp *)out_mad)->data));
930 mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
931 port_num,
932 (u8 *)(&((struct ib_smp *)out_mad)->data));
933 }
934
935out:
936 kfree(in_mad);
937 kfree(out_mad);
938 return;
939}
940
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300941void handle_port_mgmt_change_event(struct work_struct *work)
942{
943 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
944 struct mlx4_ib_dev *dev = ew->ib_dev;
945 struct mlx4_eqe *eqe = &(ew->ib_eqe);
946 u8 port = eqe->event.port_mgmt_change.port;
947 u32 changed_attr;
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000948 u32 tbl_block;
949 u32 change_bitmap;
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300950
951 switch (eqe->subtype) {
952 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
953 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
954
955 /* Update the SM ah - This should be done before handling
956 the other changed attributes so that MADs can be sent to the SM */
957 if (changed_attr & MSTR_SM_CHANGE_MASK) {
958 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
959 u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
960 update_sm_ah(dev, port, lid, sl);
961 }
962
963 /* Check if it is a lid change event */
964 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000965 handle_lid_change_event(dev, port);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300966
967 /* Generate GUID changed event */
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000968 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300969 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000970 /*if master, notify all slaves*/
971 if (mlx4_is_master(dev->dev))
972 mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
973 MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
974 }
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300975
976 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
Oren Duerb9c5d6a2012-08-03 08:40:46 +0000977 handle_client_rereg_event(dev, port);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300978 break;
979
980 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
981 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000982 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
983 propagate_pkey_ev(dev, port, eqe);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300984 break;
985 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
Jack Morgenstein66349612012-06-19 11:21:44 +0300986 /* paravirtualized master's guid is guid 0 -- does not change */
987 if (!mlx4_is_master(dev->dev))
988 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
Jack Morgenstein2a4fae12012-08-03 08:40:50 +0000989 /*if master, notify relevant slaves*/
990 else if (!dev->sriov.is_going_down) {
991 tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
992 change_bitmap = GET_MASK_FROM_EQE(eqe);
993 handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
994 }
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300995 break;
996 default:
997 pr_warn("Unsupported subtype 0x%x for "
998 "Port Management Change event\n", eqe->subtype);
999 }
1000
1001 kfree(ew);
1002}
1003
1004void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
1005 enum ib_event_type type)
1006{
1007 struct ib_event event;
1008
1009 event.device = &dev->ib_dev;
1010 event.element.port_num = port_num;
1011 event.event = type;
1012
1013 ib_dispatch_event(&event);
1014}
Jack Morgensteinfc065732012-08-03 08:40:42 +00001015
1016static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
1017{
1018 unsigned long flags;
1019 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1020 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1021 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1022 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
1023 queue_work(ctx->wq, &ctx->work);
1024 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1025}
1026
1027static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
1028 struct mlx4_ib_demux_pv_qp *tun_qp,
1029 int index)
1030{
1031 struct ib_sge sg_list;
1032 struct ib_recv_wr recv_wr, *bad_recv_wr;
1033 int size;
1034
1035 size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
1036 sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
1037
1038 sg_list.addr = tun_qp->ring[index].map;
1039 sg_list.length = size;
1040 sg_list.lkey = ctx->mr->lkey;
1041
1042 recv_wr.next = NULL;
1043 recv_wr.sg_list = &sg_list;
1044 recv_wr.num_sge = 1;
1045 recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
1046 MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
1047 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
1048 size, DMA_FROM_DEVICE);
1049 return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
1050}
1051
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +00001052static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
1053 int slave, struct ib_sa_mad *sa_mad)
1054{
Oren Duerb9c5d6a2012-08-03 08:40:46 +00001055 int ret = 0;
1056
1057 /* dispatch to different sa handlers */
1058 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
1059 case IB_SA_ATTR_MC_MEMBER_REC:
1060 ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
1061 break;
1062 default:
1063 break;
1064 }
1065 return ret;
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +00001066}
1067
1068static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
1069{
1070 int slave_start = dev->dev->caps.sqp_start + 8 * slave;
1071
1072 return (qpn >= slave_start && qpn <= slave_start + 1);
1073}
1074
1075
1076int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1077 enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
1078 u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad)
1079{
1080 struct ib_sge list;
1081 struct ib_send_wr wr, *bad_wr;
1082 struct mlx4_ib_demux_pv_ctx *sqp_ctx;
1083 struct mlx4_ib_demux_pv_qp *sqp;
1084 struct mlx4_mad_snd_buf *sqp_mad;
1085 struct ib_ah *ah;
1086 struct ib_qp *send_qp = NULL;
1087 unsigned wire_tx_ix = 0;
1088 int ret = 0;
1089 u16 wire_pkey_ix;
1090 int src_qpnum;
1091 u8 sgid_index;
1092
1093
1094 sqp_ctx = dev->sriov.sqps[port-1];
1095
1096 /* check if proxy qp created */
1097 if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
1098 return -EAGAIN;
1099
1100 /* QP0 forwarding only for Dom0 */
1101 if (dest_qpt == IB_QPT_SMI && (mlx4_master_func_num(dev->dev) != slave))
1102 return -EINVAL;
1103
1104 if (dest_qpt == IB_QPT_SMI) {
1105 src_qpnum = 0;
1106 sqp = &sqp_ctx->qp[0];
1107 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
1108 } else {
1109 src_qpnum = 1;
1110 sqp = &sqp_ctx->qp[1];
1111 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
1112 }
1113
1114 send_qp = sqp->qp;
1115
1116 /* create ah */
1117 sgid_index = attr->grh.sgid_index;
1118 attr->grh.sgid_index = 0;
1119 ah = ib_create_ah(sqp_ctx->pd, attr);
1120 if (IS_ERR(ah))
1121 return -ENOMEM;
1122 attr->grh.sgid_index = sgid_index;
1123 to_mah(ah)->av.ib.gid_index = sgid_index;
1124 /* get rid of force-loopback bit */
1125 to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
1126 spin_lock(&sqp->tx_lock);
1127 if (sqp->tx_ix_head - sqp->tx_ix_tail >=
1128 (MLX4_NUM_TUNNEL_BUFS - 1))
1129 ret = -EAGAIN;
1130 else
1131 wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
1132 spin_unlock(&sqp->tx_lock);
1133 if (ret)
1134 goto out;
1135
1136 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1137 if (sqp->tx_ring[wire_tx_ix].ah)
1138 ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
1139 sqp->tx_ring[wire_tx_ix].ah = ah;
1140 ib_dma_sync_single_for_cpu(&dev->ib_dev,
1141 sqp->tx_ring[wire_tx_ix].buf.map,
1142 sizeof (struct mlx4_mad_snd_buf),
1143 DMA_TO_DEVICE);
1144
1145 memcpy(&sqp_mad->payload, mad, sizeof *mad);
1146
1147 ib_dma_sync_single_for_device(&dev->ib_dev,
1148 sqp->tx_ring[wire_tx_ix].buf.map,
1149 sizeof (struct mlx4_mad_snd_buf),
1150 DMA_TO_DEVICE);
1151
1152 list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
1153 list.length = sizeof (struct mlx4_mad_snd_buf);
1154 list.lkey = sqp_ctx->mr->lkey;
1155
1156 wr.wr.ud.ah = ah;
1157 wr.wr.ud.port_num = port;
1158 wr.wr.ud.pkey_index = wire_pkey_ix;
1159 wr.wr.ud.remote_qkey = qkey;
1160 wr.wr.ud.remote_qpn = remote_qpn;
1161 wr.next = NULL;
1162 wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
1163 wr.sg_list = &list;
1164 wr.num_sge = 1;
1165 wr.opcode = IB_WR_SEND;
1166 wr.send_flags = IB_SEND_SIGNALED;
1167
1168 ret = ib_post_send(send_qp, &wr, &bad_wr);
1169out:
1170 if (ret)
1171 ib_destroy_ah(ah);
1172 return ret;
1173}
1174
1175static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
1176{
1177 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1178 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
1179 int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
1180 struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
1181 struct mlx4_ib_ah ah;
1182 struct ib_ah_attr ah_attr;
1183 u8 *slave_id;
1184 int slave;
1185
1186 /* Get slave that sent this packet */
1187 if (wc->src_qp < dev->dev->caps.sqp_start ||
1188 wc->src_qp >= dev->dev->caps.base_tunnel_sqpn ||
1189 (wc->src_qp & 0x1) != ctx->port - 1 ||
1190 wc->src_qp & 0x4) {
1191 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
1192 return;
1193 }
1194 slave = ((wc->src_qp & ~0x7) - dev->dev->caps.sqp_start) / 8;
1195 if (slave != ctx->slave) {
1196 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1197 "belongs to another slave\n", wc->src_qp);
1198 return;
1199 }
1200 if (slave != mlx4_master_func_num(dev->dev) && !(wc->src_qp & 0x2)) {
1201 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1202 "non-master trying to send QP0 packets\n", wc->src_qp);
1203 return;
1204 }
1205
1206 /* Map transaction ID */
1207 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
1208 sizeof (struct mlx4_tunnel_mad),
1209 DMA_FROM_DEVICE);
1210 switch (tunnel->mad.mad_hdr.method) {
1211 case IB_MGMT_METHOD_SET:
1212 case IB_MGMT_METHOD_GET:
1213 case IB_MGMT_METHOD_REPORT:
1214 case IB_SA_METHOD_GET_TABLE:
1215 case IB_SA_METHOD_DELETE:
1216 case IB_SA_METHOD_GET_MULTI:
1217 case IB_SA_METHOD_GET_TRACE_TBL:
1218 slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
1219 if (*slave_id) {
1220 mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
1221 "class:%d slave:%d\n", *slave_id,
1222 tunnel->mad.mad_hdr.mgmt_class, slave);
1223 return;
1224 } else
1225 *slave_id = slave;
1226 default:
1227 /* nothing */;
1228 }
1229
1230 /* Class-specific handling */
1231 switch (tunnel->mad.mad_hdr.mgmt_class) {
1232 case IB_MGMT_CLASS_SUBN_ADM:
1233 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
1234 (struct ib_sa_mad *) &tunnel->mad))
1235 return;
1236 break;
Amir Vadai3cf69cc2012-08-03 08:40:47 +00001237 case IB_MGMT_CLASS_CM:
1238 if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
1239 (struct ib_mad *) &tunnel->mad))
1240 return;
1241 break;
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +00001242 case IB_MGMT_CLASS_DEVICE_MGMT:
1243 if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
1244 tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
1245 return;
1246 break;
1247 default:
1248 /* Drop unsupported classes for slaves in tunnel mode */
1249 if (slave != mlx4_master_func_num(dev->dev)) {
1250 mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
1251 "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
1252 return;
1253 }
1254 }
1255
1256 /* We are using standard ib_core services to send the mad, so generate a
1257 * stadard address handle by decoding the tunnelled mlx4_ah fields */
1258 memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
1259 ah.ibah.device = ctx->ib_dev;
1260 mlx4_ib_query_ah(&ah.ibah, &ah_attr);
1261 if ((ah_attr.ah_flags & IB_AH_GRH) &&
1262 (ah_attr.grh.sgid_index != slave)) {
1263 mlx4_ib_warn(ctx->ib_dev, "slave:%d accessed invalid sgid_index:%d\n",
1264 slave, ah_attr.grh.sgid_index);
1265 return;
1266 }
1267
1268 mlx4_ib_send_to_wire(dev, slave, ctx->port,
1269 is_proxy_qp0(dev, wc->src_qp, slave) ?
1270 IB_QPT_SMI : IB_QPT_GSI,
1271 be16_to_cpu(tunnel->hdr.pkey_index),
1272 be32_to_cpu(tunnel->hdr.remote_qpn),
1273 be32_to_cpu(tunnel->hdr.qkey),
1274 &ah_attr, &tunnel->mad);
1275}
1276
Jack Morgensteinfc065732012-08-03 08:40:42 +00001277static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1278 enum ib_qp_type qp_type, int is_tun)
1279{
1280 int i;
1281 struct mlx4_ib_demux_pv_qp *tun_qp;
1282 int rx_buf_size, tx_buf_size;
1283
1284 if (qp_type > IB_QPT_GSI)
1285 return -EINVAL;
1286
1287 tun_qp = &ctx->qp[qp_type];
1288
1289 tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
1290 GFP_KERNEL);
1291 if (!tun_qp->ring)
1292 return -ENOMEM;
1293
1294 tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
1295 sizeof (struct mlx4_ib_tun_tx_buf),
1296 GFP_KERNEL);
1297 if (!tun_qp->tx_ring) {
1298 kfree(tun_qp->ring);
1299 tun_qp->ring = NULL;
1300 return -ENOMEM;
1301 }
1302
1303 if (is_tun) {
1304 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1305 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1306 } else {
1307 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1308 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1309 }
1310
1311 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1312 tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
1313 if (!tun_qp->ring[i].addr)
1314 goto err;
1315 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
1316 tun_qp->ring[i].addr,
1317 rx_buf_size,
1318 DMA_FROM_DEVICE);
1319 }
1320
1321 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1322 tun_qp->tx_ring[i].buf.addr =
1323 kmalloc(tx_buf_size, GFP_KERNEL);
1324 if (!tun_qp->tx_ring[i].buf.addr)
1325 goto tx_err;
1326 tun_qp->tx_ring[i].buf.map =
1327 ib_dma_map_single(ctx->ib_dev,
1328 tun_qp->tx_ring[i].buf.addr,
1329 tx_buf_size,
1330 DMA_TO_DEVICE);
1331 tun_qp->tx_ring[i].ah = NULL;
1332 }
1333 spin_lock_init(&tun_qp->tx_lock);
1334 tun_qp->tx_ix_head = 0;
1335 tun_qp->tx_ix_tail = 0;
1336 tun_qp->proxy_qpt = qp_type;
1337
1338 return 0;
1339
1340tx_err:
1341 while (i > 0) {
1342 --i;
1343 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1344 tx_buf_size, DMA_TO_DEVICE);
1345 kfree(tun_qp->tx_ring[i].buf.addr);
1346 }
1347 kfree(tun_qp->tx_ring);
1348 tun_qp->tx_ring = NULL;
1349 i = MLX4_NUM_TUNNEL_BUFS;
1350err:
1351 while (i > 0) {
1352 --i;
1353 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1354 rx_buf_size, DMA_FROM_DEVICE);
1355 kfree(tun_qp->ring[i].addr);
1356 }
1357 kfree(tun_qp->ring);
1358 tun_qp->ring = NULL;
1359 return -ENOMEM;
1360}
1361
1362static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1363 enum ib_qp_type qp_type, int is_tun)
1364{
1365 int i;
1366 struct mlx4_ib_demux_pv_qp *tun_qp;
1367 int rx_buf_size, tx_buf_size;
1368
1369 if (qp_type > IB_QPT_GSI)
1370 return;
1371
1372 tun_qp = &ctx->qp[qp_type];
1373 if (is_tun) {
1374 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1375 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1376 } else {
1377 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1378 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1379 }
1380
1381
1382 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1383 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1384 rx_buf_size, DMA_FROM_DEVICE);
1385 kfree(tun_qp->ring[i].addr);
1386 }
1387
1388 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1389 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1390 tx_buf_size, DMA_TO_DEVICE);
1391 kfree(tun_qp->tx_ring[i].buf.addr);
1392 if (tun_qp->tx_ring[i].ah)
1393 ib_destroy_ah(tun_qp->tx_ring[i].ah);
1394 }
1395 kfree(tun_qp->tx_ring);
1396 kfree(tun_qp->ring);
1397}
1398
1399static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
1400{
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +00001401 struct mlx4_ib_demux_pv_ctx *ctx;
1402 struct mlx4_ib_demux_pv_qp *tun_qp;
1403 struct ib_wc wc;
1404 int ret;
1405 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1406 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1407
1408 while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1409 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1410 if (wc.status == IB_WC_SUCCESS) {
1411 switch (wc.opcode) {
1412 case IB_WC_RECV:
1413 mlx4_ib_multiplex_mad(ctx, &wc);
1414 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1415 wc.wr_id &
1416 (MLX4_NUM_TUNNEL_BUFS - 1));
1417 if (ret)
1418 pr_err("Failed reposting tunnel "
1419 "buf:%lld\n", wc.wr_id);
1420 break;
1421 case IB_WC_SEND:
1422 pr_debug("received tunnel send completion:"
1423 "wrid=0x%llx, status=0x%x\n",
1424 wc.wr_id, wc.status);
1425 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1426 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1427 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1428 = NULL;
1429 spin_lock(&tun_qp->tx_lock);
1430 tun_qp->tx_ix_tail++;
1431 spin_unlock(&tun_qp->tx_lock);
1432
1433 break;
1434 default:
1435 break;
1436 }
1437 } else {
1438 pr_debug("mlx4_ib: completion error in tunnel: %d."
1439 " status = %d, wrid = 0x%llx\n",
1440 ctx->slave, wc.status, wc.wr_id);
1441 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1442 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1443 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1444 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1445 = NULL;
1446 spin_lock(&tun_qp->tx_lock);
1447 tun_qp->tx_ix_tail++;
1448 spin_unlock(&tun_qp->tx_lock);
1449 }
1450 }
1451 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00001452}
1453
1454static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
1455{
1456 struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
1457
1458 /* It's worse than that! He's dead, Jim! */
1459 pr_err("Fatal error (%d) on a MAD QP on port %d\n",
1460 event->event, sqp->port);
1461}
1462
1463static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
1464 enum ib_qp_type qp_type, int create_tun)
1465{
1466 int i, ret;
1467 struct mlx4_ib_demux_pv_qp *tun_qp;
1468 struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
1469 struct ib_qp_attr attr;
1470 int qp_attr_mask_INIT;
1471
1472 if (qp_type > IB_QPT_GSI)
1473 return -EINVAL;
1474
1475 tun_qp = &ctx->qp[qp_type];
1476
1477 memset(&qp_init_attr, 0, sizeof qp_init_attr);
1478 qp_init_attr.init_attr.send_cq = ctx->cq;
1479 qp_init_attr.init_attr.recv_cq = ctx->cq;
1480 qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
1481 qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
1482 qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
1483 qp_init_attr.init_attr.cap.max_send_sge = 1;
1484 qp_init_attr.init_attr.cap.max_recv_sge = 1;
1485 if (create_tun) {
1486 qp_init_attr.init_attr.qp_type = IB_QPT_UD;
1487 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP;
1488 qp_init_attr.port = ctx->port;
1489 qp_init_attr.slave = ctx->slave;
1490 qp_init_attr.proxy_qp_type = qp_type;
1491 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
1492 IB_QP_QKEY | IB_QP_PORT;
1493 } else {
1494 qp_init_attr.init_attr.qp_type = qp_type;
1495 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP;
1496 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
1497 }
1498 qp_init_attr.init_attr.port_num = ctx->port;
1499 qp_init_attr.init_attr.qp_context = ctx;
1500 qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
1501 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
1502 if (IS_ERR(tun_qp->qp)) {
1503 ret = PTR_ERR(tun_qp->qp);
1504 tun_qp->qp = NULL;
1505 pr_err("Couldn't create %s QP (%d)\n",
1506 create_tun ? "tunnel" : "special", ret);
1507 return ret;
1508 }
1509
1510 memset(&attr, 0, sizeof attr);
1511 attr.qp_state = IB_QPS_INIT;
1512 attr.pkey_index =
1513 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
1514 attr.qkey = IB_QP1_QKEY;
1515 attr.port_num = ctx->port;
1516 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
1517 if (ret) {
1518 pr_err("Couldn't change %s qp state to INIT (%d)\n",
1519 create_tun ? "tunnel" : "special", ret);
1520 goto err_qp;
1521 }
1522 attr.qp_state = IB_QPS_RTR;
1523 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
1524 if (ret) {
1525 pr_err("Couldn't change %s qp state to RTR (%d)\n",
1526 create_tun ? "tunnel" : "special", ret);
1527 goto err_qp;
1528 }
1529 attr.qp_state = IB_QPS_RTS;
1530 attr.sq_psn = 0;
1531 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
1532 if (ret) {
1533 pr_err("Couldn't change %s qp state to RTS (%d)\n",
1534 create_tun ? "tunnel" : "special", ret);
1535 goto err_qp;
1536 }
1537
1538 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1539 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1540 if (ret) {
1541 pr_err(" mlx4_ib_post_pv_buf error"
1542 " (err = %d, i = %d)\n", ret, i);
1543 goto err_qp;
1544 }
1545 }
1546 return 0;
1547
1548err_qp:
1549 ib_destroy_qp(tun_qp->qp);
1550 tun_qp->qp = NULL;
1551 return ret;
1552}
1553
1554/*
1555 * IB MAD completion callback for real SQPs
1556 */
1557static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1558{
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +00001559 struct mlx4_ib_demux_pv_ctx *ctx;
1560 struct mlx4_ib_demux_pv_qp *sqp;
1561 struct ib_wc wc;
1562 struct ib_grh *grh;
1563 struct ib_mad *mad;
1564
1565 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1566 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1567
1568 while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1569 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1570 if (wc.status == IB_WC_SUCCESS) {
1571 switch (wc.opcode) {
1572 case IB_WC_SEND:
1573 ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1574 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1575 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1576 = NULL;
1577 spin_lock(&sqp->tx_lock);
1578 sqp->tx_ix_tail++;
1579 spin_unlock(&sqp->tx_lock);
1580 break;
1581 case IB_WC_RECV:
1582 mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
1583 (sqp->ring[wc.wr_id &
1584 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
1585 grh = &(((struct mlx4_mad_rcv_buf *)
1586 (sqp->ring[wc.wr_id &
1587 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
1588 mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
1589 if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
1590 (MLX4_NUM_TUNNEL_BUFS - 1)))
1591 pr_err("Failed reposting SQP "
1592 "buf:%lld\n", wc.wr_id);
1593 break;
1594 default:
1595 BUG_ON(1);
1596 break;
1597 }
1598 } else {
1599 pr_debug("mlx4_ib: completion error in tunnel: %d."
1600 " status = %d, wrid = 0x%llx\n",
1601 ctx->slave, wc.status, wc.wr_id);
1602 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1603 ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1604 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1605 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1606 = NULL;
1607 spin_lock(&sqp->tx_lock);
1608 sqp->tx_ix_tail++;
1609 spin_unlock(&sqp->tx_lock);
1610 }
1611 }
1612 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00001613}
1614
1615static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
1616 struct mlx4_ib_demux_pv_ctx **ret_ctx)
1617{
1618 struct mlx4_ib_demux_pv_ctx *ctx;
1619
1620 *ret_ctx = NULL;
1621 ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
1622 if (!ctx) {
1623 pr_err("failed allocating pv resource context "
1624 "for port %d, slave %d\n", port, slave);
1625 return -ENOMEM;
1626 }
1627
1628 ctx->ib_dev = &dev->ib_dev;
1629 ctx->port = port;
1630 ctx->slave = slave;
1631 *ret_ctx = ctx;
1632 return 0;
1633}
1634
1635static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
1636{
1637 if (dev->sriov.demux[port - 1].tun[slave]) {
1638 kfree(dev->sriov.demux[port - 1].tun[slave]);
1639 dev->sriov.demux[port - 1].tun[slave] = NULL;
1640 }
1641}
1642
1643static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1644 int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
1645{
1646 int ret, cq_size;
1647
1648 ctx->state = DEMUX_PV_STATE_STARTING;
1649 /* have QP0 only on port owner, and only if link layer is IB */
1650 if (ctx->slave == mlx4_master_func_num(to_mdev(ctx->ib_dev)->dev) &&
1651 rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND)
1652 ctx->has_smi = 1;
1653
1654 if (ctx->has_smi) {
1655 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
1656 if (ret) {
1657 pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
1658 goto err_out;
1659 }
1660 }
1661
1662 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
1663 if (ret) {
1664 pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
1665 goto err_out_qp0;
1666 }
1667
1668 cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
1669 if (ctx->has_smi)
1670 cq_size *= 2;
1671
1672 ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
1673 NULL, ctx, cq_size, 0);
1674 if (IS_ERR(ctx->cq)) {
1675 ret = PTR_ERR(ctx->cq);
1676 pr_err("Couldn't create tunnel CQ (%d)\n", ret);
1677 goto err_buf;
1678 }
1679
1680 ctx->pd = ib_alloc_pd(ctx->ib_dev);
1681 if (IS_ERR(ctx->pd)) {
1682 ret = PTR_ERR(ctx->pd);
1683 pr_err("Couldn't create tunnel PD (%d)\n", ret);
1684 goto err_cq;
1685 }
1686
1687 ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE);
1688 if (IS_ERR(ctx->mr)) {
1689 ret = PTR_ERR(ctx->mr);
1690 pr_err("Couldn't get tunnel DMA MR (%d)\n", ret);
1691 goto err_pd;
1692 }
1693
1694 if (ctx->has_smi) {
1695 ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
1696 if (ret) {
1697 pr_err("Couldn't create %s QP0 (%d)\n",
1698 create_tun ? "tunnel for" : "", ret);
1699 goto err_mr;
1700 }
1701 }
1702
1703 ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
1704 if (ret) {
1705 pr_err("Couldn't create %s QP1 (%d)\n",
1706 create_tun ? "tunnel for" : "", ret);
1707 goto err_qp0;
1708 }
1709
1710 if (create_tun)
1711 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
1712 else
1713 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
1714
1715 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
1716
1717 ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1718 if (ret) {
1719 pr_err("Couldn't arm tunnel cq (%d)\n", ret);
1720 goto err_wq;
1721 }
1722 ctx->state = DEMUX_PV_STATE_ACTIVE;
1723 return 0;
1724
1725err_wq:
1726 ctx->wq = NULL;
1727 ib_destroy_qp(ctx->qp[1].qp);
1728 ctx->qp[1].qp = NULL;
1729
1730
1731err_qp0:
1732 if (ctx->has_smi)
1733 ib_destroy_qp(ctx->qp[0].qp);
1734 ctx->qp[0].qp = NULL;
1735
1736err_mr:
1737 ib_dereg_mr(ctx->mr);
1738 ctx->mr = NULL;
1739
1740err_pd:
1741 ib_dealloc_pd(ctx->pd);
1742 ctx->pd = NULL;
1743
1744err_cq:
1745 ib_destroy_cq(ctx->cq);
1746 ctx->cq = NULL;
1747
1748err_buf:
1749 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
1750
1751err_out_qp0:
1752 if (ctx->has_smi)
1753 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
1754err_out:
1755 ctx->state = DEMUX_PV_STATE_DOWN;
1756 return ret;
1757}
1758
1759static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
1760 struct mlx4_ib_demux_pv_ctx *ctx, int flush)
1761{
1762 if (!ctx)
1763 return;
1764 if (ctx->state > DEMUX_PV_STATE_DOWN) {
1765 ctx->state = DEMUX_PV_STATE_DOWNING;
1766 if (flush)
1767 flush_workqueue(ctx->wq);
1768 if (ctx->has_smi) {
1769 ib_destroy_qp(ctx->qp[0].qp);
1770 ctx->qp[0].qp = NULL;
1771 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
1772 }
1773 ib_destroy_qp(ctx->qp[1].qp);
1774 ctx->qp[1].qp = NULL;
1775 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
1776 ib_dereg_mr(ctx->mr);
1777 ctx->mr = NULL;
1778 ib_dealloc_pd(ctx->pd);
1779 ctx->pd = NULL;
1780 ib_destroy_cq(ctx->cq);
1781 ctx->cq = NULL;
1782 ctx->state = DEMUX_PV_STATE_DOWN;
1783 }
1784}
1785
1786static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
1787 int port, int do_init)
1788{
1789 int ret = 0;
1790
1791 if (!do_init) {
Oren Duerb9c5d6a2012-08-03 08:40:46 +00001792 clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
Jack Morgensteinfc065732012-08-03 08:40:42 +00001793 /* for master, destroy real sqp resources */
1794 if (slave == mlx4_master_func_num(dev->dev))
1795 destroy_pv_resources(dev, slave, port,
1796 dev->sriov.sqps[port - 1], 1);
1797 /* destroy the tunnel qp resources */
1798 destroy_pv_resources(dev, slave, port,
1799 dev->sriov.demux[port - 1].tun[slave], 1);
1800 return 0;
1801 }
1802
1803 /* create the tunnel qp resources */
1804 ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
1805 dev->sriov.demux[port - 1].tun[slave]);
1806
1807 /* for master, create the real sqp resources */
1808 if (!ret && slave == mlx4_master_func_num(dev->dev))
1809 ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
1810 dev->sriov.sqps[port - 1]);
1811 return ret;
1812}
1813
1814void mlx4_ib_tunnels_update_work(struct work_struct *work)
1815{
1816 struct mlx4_ib_demux_work *dmxw;
1817
1818 dmxw = container_of(work, struct mlx4_ib_demux_work, work);
1819 mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
1820 dmxw->do_init);
1821 kfree(dmxw);
1822 return;
1823}
1824
1825static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
1826 struct mlx4_ib_demux_ctx *ctx,
1827 int port)
1828{
1829 char name[12];
1830 int ret = 0;
1831 int i;
1832
1833 ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
1834 sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
1835 if (!ctx->tun)
1836 return -ENOMEM;
1837
1838 ctx->dev = dev;
1839 ctx->port = port;
1840 ctx->ib_dev = &dev->ib_dev;
1841
1842 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
1843 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
1844 if (ret) {
1845 ret = -ENOMEM;
Oren Duerb9c5d6a2012-08-03 08:40:46 +00001846 goto err_mcg;
Jack Morgensteinfc065732012-08-03 08:40:42 +00001847 }
1848 }
1849
Oren Duerb9c5d6a2012-08-03 08:40:46 +00001850 ret = mlx4_ib_mcg_port_init(ctx);
1851 if (ret) {
1852 pr_err("Failed initializing mcg para-virt (%d)\n", ret);
1853 goto err_mcg;
1854 }
1855
Jack Morgensteinfc065732012-08-03 08:40:42 +00001856 snprintf(name, sizeof name, "mlx4_ibt%d", port);
1857 ctx->wq = create_singlethread_workqueue(name);
1858 if (!ctx->wq) {
1859 pr_err("Failed to create tunnelling WQ for port %d\n", port);
1860 ret = -ENOMEM;
1861 goto err_wq;
1862 }
1863
1864 snprintf(name, sizeof name, "mlx4_ibud%d", port);
1865 ctx->ud_wq = create_singlethread_workqueue(name);
1866 if (!ctx->ud_wq) {
1867 pr_err("Failed to create up/down WQ for port %d\n", port);
1868 ret = -ENOMEM;
1869 goto err_udwq;
1870 }
1871
1872 return 0;
1873
1874err_udwq:
1875 destroy_workqueue(ctx->wq);
1876 ctx->wq = NULL;
1877
1878err_wq:
Oren Duerb9c5d6a2012-08-03 08:40:46 +00001879 mlx4_ib_mcg_port_cleanup(ctx, 1);
1880err_mcg:
Jack Morgensteinfc065732012-08-03 08:40:42 +00001881 for (i = 0; i < dev->dev->caps.sqp_demux; i++)
1882 free_pv_object(dev, i, port);
1883 kfree(ctx->tun);
1884 ctx->tun = NULL;
1885 return ret;
1886}
1887
1888static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
1889{
1890 if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
1891 sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
1892 flush_workqueue(sqp_ctx->wq);
1893 if (sqp_ctx->has_smi) {
1894 ib_destroy_qp(sqp_ctx->qp[0].qp);
1895 sqp_ctx->qp[0].qp = NULL;
1896 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
1897 }
1898 ib_destroy_qp(sqp_ctx->qp[1].qp);
1899 sqp_ctx->qp[1].qp = NULL;
1900 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
1901 ib_dereg_mr(sqp_ctx->mr);
1902 sqp_ctx->mr = NULL;
1903 ib_dealloc_pd(sqp_ctx->pd);
1904 sqp_ctx->pd = NULL;
1905 ib_destroy_cq(sqp_ctx->cq);
1906 sqp_ctx->cq = NULL;
1907 sqp_ctx->state = DEMUX_PV_STATE_DOWN;
1908 }
1909}
1910
1911static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
1912{
1913 int i;
1914 if (ctx) {
1915 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
Oren Duerb9c5d6a2012-08-03 08:40:46 +00001916 mlx4_ib_mcg_port_cleanup(ctx, 1);
Jack Morgensteinfc065732012-08-03 08:40:42 +00001917 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
1918 if (!ctx->tun[i])
1919 continue;
1920 if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
1921 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
1922 }
1923 flush_workqueue(ctx->wq);
1924 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
1925 destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
1926 free_pv_object(dev, i, ctx->port);
1927 }
1928 kfree(ctx->tun);
1929 destroy_workqueue(ctx->ud_wq);
1930 destroy_workqueue(ctx->wq);
1931 }
1932}
1933
1934static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
1935{
1936 int i;
1937
1938 if (!mlx4_is_master(dev->dev))
1939 return;
1940 /* initialize or tear down tunnel QPs for the master */
1941 for (i = 0; i < dev->dev->caps.num_ports; i++)
1942 mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
1943 return;
1944}
1945
1946int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
1947{
1948 int i = 0;
1949 int err;
1950
1951 if (!mlx4_is_mfunc(dev->dev))
1952 return 0;
1953
1954 dev->sriov.is_going_down = 0;
1955 spin_lock_init(&dev->sriov.going_down_lock);
Amir Vadai3cf69cc2012-08-03 08:40:47 +00001956 mlx4_ib_cm_paravirt_init(dev);
Jack Morgensteinfc065732012-08-03 08:40:42 +00001957
1958 mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
1959
1960 if (mlx4_is_slave(dev->dev)) {
1961 mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
1962 return 0;
1963 }
1964
Jack Morgensteina0c64a12012-08-03 08:40:49 +00001965 err = mlx4_ib_init_alias_guid_service(dev);
1966 if (err) {
1967 mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
1968 goto paravirt_err;
1969 }
Jack Morgensteinc1e7e462012-08-03 08:40:51 +00001970 err = mlx4_ib_device_register_sysfs(dev);
1971 if (err) {
1972 mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n");
1973 goto sysfs_err;
1974 }
Jack Morgensteina0c64a12012-08-03 08:40:49 +00001975
Jack Morgensteinfc065732012-08-03 08:40:42 +00001976 mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
1977 dev->dev->caps.sqp_demux);
1978 for (i = 0; i < dev->num_ports; i++) {
Jack Morgensteina0c64a12012-08-03 08:40:49 +00001979 union ib_gid gid;
1980 err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
1981 if (err)
1982 goto demux_err;
1983 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
Jack Morgensteinfc065732012-08-03 08:40:42 +00001984 err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
1985 &dev->sriov.sqps[i]);
1986 if (err)
1987 goto demux_err;
1988 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
1989 if (err)
1990 goto demux_err;
1991 }
1992 mlx4_ib_master_tunnels(dev, 1);
1993 return 0;
1994
1995demux_err:
1996 while (i > 0) {
1997 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
1998 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
1999 --i;
2000 }
Jack Morgensteinc1e7e462012-08-03 08:40:51 +00002001 mlx4_ib_device_unregister_sysfs(dev);
2002
2003sysfs_err:
Jack Morgensteina0c64a12012-08-03 08:40:49 +00002004 mlx4_ib_destroy_alias_guid_service(dev);
2005
2006paravirt_err:
Amir Vadai3cf69cc2012-08-03 08:40:47 +00002007 mlx4_ib_cm_paravirt_clean(dev, -1);
Jack Morgensteinfc065732012-08-03 08:40:42 +00002008
2009 return err;
2010}
2011
2012void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
2013{
2014 int i;
2015 unsigned long flags;
2016
2017 if (!mlx4_is_mfunc(dev->dev))
2018 return;
2019
2020 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
2021 dev->sriov.is_going_down = 1;
2022 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
Amir Vadai3cf69cc2012-08-03 08:40:47 +00002023 if (mlx4_is_master(dev->dev)) {
Jack Morgensteinfc065732012-08-03 08:40:42 +00002024 for (i = 0; i < dev->num_ports; i++) {
2025 flush_workqueue(dev->sriov.demux[i].ud_wq);
2026 mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
2027 kfree(dev->sriov.sqps[i]);
2028 dev->sriov.sqps[i] = NULL;
2029 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2030 }
Amir Vadai3cf69cc2012-08-03 08:40:47 +00002031
2032 mlx4_ib_cm_paravirt_clean(dev, -1);
Jack Morgensteina0c64a12012-08-03 08:40:49 +00002033 mlx4_ib_destroy_alias_guid_service(dev);
Jack Morgensteinc1e7e462012-08-03 08:40:51 +00002034 mlx4_ib_device_unregister_sysfs(dev);
Amir Vadai3cf69cc2012-08-03 08:40:47 +00002035 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00002036}