blob: e98849338a94bd95389a1f8190bc90a23ef44aa0 [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_mad.h>
34#include <rdma/ib_smi.h>
35
36#include <linux/mlx4/cmd.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/gfp.h>
Or Gerlitzc3779132011-06-15 14:51:27 +000038#include <rdma/ib_pma.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070039
40#include "mlx4_ib.h"
41
42enum {
43 MLX4_IB_VENDOR_CLASS1 = 0x9,
44 MLX4_IB_VENDOR_CLASS2 = 0xa
45};
46
Jack Morgensteinfc065732012-08-03 08:40:42 +000047#define MLX4_TUN_SEND_WRID_SHIFT 34
48#define MLX4_TUN_QPN_SHIFT 32
49#define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
50#define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
51
52#define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
53#define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
54
55struct mlx4_mad_rcv_buf {
56 struct ib_grh grh;
57 u8 payload[256];
58} __packed;
59
60struct mlx4_mad_snd_buf {
61 u8 payload[256];
62} __packed;
63
64struct mlx4_tunnel_mad {
65 struct ib_grh grh;
66 struct mlx4_ib_tunnel_header hdr;
67 struct ib_mad mad;
68} __packed;
69
70struct mlx4_rcv_tunnel_mad {
71 struct mlx4_rcv_tunnel_hdr hdr;
72 struct ib_grh grh;
73 struct ib_mad mad;
74} __packed;
75
Roland Dreier225c7b12007-05-08 18:00:38 -070076int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
77 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
78 void *in_mad, void *response_mad)
79{
80 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
81 void *inbox;
82 int err;
83 u32 in_modifier = port;
84 u8 op_modifier = 0;
85
86 inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
87 if (IS_ERR(inmailbox))
88 return PTR_ERR(inmailbox);
89 inbox = inmailbox->buf;
90
91 outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
92 if (IS_ERR(outmailbox)) {
93 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
94 return PTR_ERR(outmailbox);
95 }
96
97 memcpy(inbox, in_mad, 256);
98
99 /*
100 * Key check traps can't be generated unless we have in_wc to
101 * tell us where to send the trap.
102 */
103 if (ignore_mkey || !in_wc)
104 op_modifier |= 0x1;
105 if (ignore_bkey || !in_wc)
106 op_modifier |= 0x2;
107
108 if (in_wc) {
109 struct {
110 __be32 my_qpn;
111 u32 reserved1;
112 __be32 rqpn;
113 u8 sl;
114 u8 g_path;
115 u16 reserved2[2];
116 __be16 pkey;
117 u32 reserved3[11];
118 u8 grh[40];
119 } *ext_info;
120
121 memset(inbox + 256, 0, 256);
122 ext_info = inbox + 256;
123
124 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
125 ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
126 ext_info->sl = in_wc->sl << 4;
127 ext_info->g_path = in_wc->dlid_path_bits |
128 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
129 ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
130
131 if (in_grh)
132 memcpy(ext_info->grh, in_grh, 40);
133
134 op_modifier |= 0x4;
135
136 in_modifier |= in_wc->slid << 16;
137 }
138
139 err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma,
140 in_modifier, op_modifier,
Jack Morgensteinf9baff52011-12-13 04:10:51 +0000141 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
142 MLX4_CMD_NATIVE);
Roland Dreier225c7b12007-05-08 18:00:38 -0700143
Ilpo Järvinenfe11cb62007-08-16 01:02:07 +0300144 if (!err)
Roland Dreier225c7b12007-05-08 18:00:38 -0700145 memcpy(response_mad, outmailbox->buf, 256);
146
147 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
148 mlx4_free_cmd_mailbox(dev->dev, outmailbox);
149
150 return err;
151}
152
153static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
154{
155 struct ib_ah *new_ah;
156 struct ib_ah_attr ah_attr;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000157 unsigned long flags;
Roland Dreier225c7b12007-05-08 18:00:38 -0700158
159 if (!dev->send_agent[port_num - 1][0])
160 return;
161
162 memset(&ah_attr, 0, sizeof ah_attr);
163 ah_attr.dlid = lid;
164 ah_attr.sl = sl;
165 ah_attr.port_num = port_num;
166
167 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
168 &ah_attr);
169 if (IS_ERR(new_ah))
170 return;
171
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000172 spin_lock_irqsave(&dev->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700173 if (dev->sm_ah[port_num - 1])
174 ib_destroy_ah(dev->sm_ah[port_num - 1]);
175 dev->sm_ah[port_num - 1] = new_ah;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000176 spin_unlock_irqrestore(&dev->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700177}
178
179/*
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300180 * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
181 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
Roland Dreier225c7b12007-05-08 18:00:38 -0700182 */
Moni Shouaf0f6f342009-01-28 14:54:35 -0800183static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300184 u16 prev_lid)
Roland Dreier225c7b12007-05-08 18:00:38 -0700185{
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300186 struct ib_port_info *pinfo;
187 u16 lid;
Roland Dreier225c7b12007-05-08 18:00:38 -0700188
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300189 struct mlx4_ib_dev *dev = to_mdev(ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -0700190 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
191 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300192 mad->mad_hdr.method == IB_MGMT_METHOD_SET)
193 switch (mad->mad_hdr.attr_id) {
194 case IB_SMP_ATTR_PORT_INFO:
195 pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
196 lid = be16_to_cpu(pinfo->lid);
Roland Dreier225c7b12007-05-08 18:00:38 -0700197
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300198 update_sm_ah(dev, port_num,
Roland Dreier225c7b12007-05-08 18:00:38 -0700199 be16_to_cpu(pinfo->sm_lid),
200 pinfo->neighbormtu_mastersmsl & 0xf);
201
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300202 if (pinfo->clientrereg_resv_subnetto & 0x80)
203 mlx4_ib_dispatch_event(dev, port_num,
204 IB_EVENT_CLIENT_REREGISTER);
Roland Dreier225c7b12007-05-08 18:00:38 -0700205
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300206 if (prev_lid != lid)
207 mlx4_ib_dispatch_event(dev, port_num,
208 IB_EVENT_LID_CHANGE);
209 break;
Roland Dreier225c7b12007-05-08 18:00:38 -0700210
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300211 case IB_SMP_ATTR_PKEY_TABLE:
212 mlx4_ib_dispatch_event(dev, port_num,
213 IB_EVENT_PKEY_CHANGE);
214 break;
215
216 case IB_SMP_ATTR_GUID_INFO:
Jack Morgenstein66349612012-06-19 11:21:44 +0300217 /* paravirtualized master's guid is guid 0 -- does not change */
218 if (!mlx4_is_master(dev->dev))
219 mlx4_ib_dispatch_event(dev, port_num,
220 IB_EVENT_GID_CHANGE);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300221 break;
222 default:
223 break;
Roland Dreier225c7b12007-05-08 18:00:38 -0700224 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700225}
226
227static void node_desc_override(struct ib_device *dev,
228 struct ib_mad *mad)
229{
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000230 unsigned long flags;
231
Roland Dreier225c7b12007-05-08 18:00:38 -0700232 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
233 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
234 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
235 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000236 spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700237 memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000238 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700239 }
240}
241
242static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad)
243{
244 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
245 struct ib_mad_send_buf *send_buf;
246 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
247 int ret;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000248 unsigned long flags;
Roland Dreier225c7b12007-05-08 18:00:38 -0700249
250 if (agent) {
251 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
252 IB_MGMT_MAD_DATA, GFP_ATOMIC);
Dan Carpenter13974902011-01-10 17:42:06 -0800253 if (IS_ERR(send_buf))
254 return;
Roland Dreier225c7b12007-05-08 18:00:38 -0700255 /*
256 * We rely here on the fact that MLX QPs don't use the
257 * address handle after the send is posted (this is
258 * wrong following the IB spec strictly, but we know
259 * it's OK for our devices).
260 */
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000261 spin_lock_irqsave(&dev->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700262 memcpy(send_buf->mad, mad, sizeof *mad);
263 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
264 ret = ib_post_send_mad(send_buf, NULL);
265 else
266 ret = -EINVAL;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000267 spin_unlock_irqrestore(&dev->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700268
269 if (ret)
270 ib_free_send_mad(send_buf);
271 }
272}
273
Or Gerlitzc3779132011-06-15 14:51:27 +0000274static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
Roland Dreier225c7b12007-05-08 18:00:38 -0700275 struct ib_wc *in_wc, struct ib_grh *in_grh,
276 struct ib_mad *in_mad, struct ib_mad *out_mad)
277{
Moni Shouaf0f6f342009-01-28 14:54:35 -0800278 u16 slid, prev_lid = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -0700279 int err;
Moni Shouaf0f6f342009-01-28 14:54:35 -0800280 struct ib_port_attr pattr;
Roland Dreier225c7b12007-05-08 18:00:38 -0700281
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +0300282 if (in_wc && in_wc->qp->qp_num) {
283 pr_debug("received MAD: slid:%d sqpn:%d "
284 "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
285 in_wc->slid, in_wc->src_qp,
286 in_wc->dlid_path_bits,
287 in_wc->qp->qp_num,
288 in_wc->wc_flags,
289 in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
290 be16_to_cpu(in_mad->mad_hdr.attr_id));
291 if (in_wc->wc_flags & IB_WC_GRH) {
292 pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
293 be64_to_cpu(in_grh->sgid.global.subnet_prefix),
294 be64_to_cpu(in_grh->sgid.global.interface_id));
295 pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
296 be64_to_cpu(in_grh->dgid.global.subnet_prefix),
297 be64_to_cpu(in_grh->dgid.global.interface_id));
298 }
299 }
300
Roland Dreier225c7b12007-05-08 18:00:38 -0700301 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
302
303 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
304 forward_trap(to_mdev(ibdev), port_num, in_mad);
305 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
306 }
307
308 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
309 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
310 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
311 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
312 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
313 return IB_MAD_RESULT_SUCCESS;
314
315 /*
Jack Morgensteina6f7fea2012-01-26 16:41:33 +0200316 * Don't process SMInfo queries -- the SMA can't handle them.
Roland Dreier225c7b12007-05-08 18:00:38 -0700317 */
Jack Morgensteina6f7fea2012-01-26 16:41:33 +0200318 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
Roland Dreier225c7b12007-05-08 18:00:38 -0700319 return IB_MAD_RESULT_SUCCESS;
320 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
321 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
Eli Cohen6578cf32008-07-14 23:48:45 -0700322 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 ||
323 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700324 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
325 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
326 return IB_MAD_RESULT_SUCCESS;
327 } else
328 return IB_MAD_RESULT_SUCCESS;
329
Moni Shouaf0f6f342009-01-28 14:54:35 -0800330 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
331 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
332 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
333 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
334 !ib_query_port(ibdev, port_num, &pattr))
335 prev_lid = pattr.lid;
336
Roland Dreier225c7b12007-05-08 18:00:38 -0700337 err = mlx4_MAD_IFC(to_mdev(ibdev),
338 mad_flags & IB_MAD_IGNORE_MKEY,
339 mad_flags & IB_MAD_IGNORE_BKEY,
340 port_num, in_wc, in_grh, in_mad, out_mad);
341 if (err)
342 return IB_MAD_RESULT_FAILURE;
343
344 if (!out_mad->mad_hdr.status) {
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300345 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
346 smp_snoop(ibdev, port_num, in_mad, prev_lid);
Roland Dreier225c7b12007-05-08 18:00:38 -0700347 node_desc_override(ibdev, out_mad);
348 }
349
350 /* set return bit in status of directed route responses */
351 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
352 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
353
354 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
355 /* no response for trap repress */
356 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
357
358 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
359}
360
Or Gerlitzc3779132011-06-15 14:51:27 +0000361static void edit_counter(struct mlx4_counter *cnt,
362 struct ib_pma_portcounters *pma_cnt)
363{
364 pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
365 pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
366 pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
367 pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
368}
369
370static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
371 struct ib_wc *in_wc, struct ib_grh *in_grh,
372 struct ib_mad *in_mad, struct ib_mad *out_mad)
373{
374 struct mlx4_cmd_mailbox *mailbox;
375 struct mlx4_ib_dev *dev = to_mdev(ibdev);
376 int err;
377 u32 inmod = dev->counters[port_num - 1] & 0xffff;
378 u8 mode;
379
380 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
381 return -EINVAL;
382
383 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
384 if (IS_ERR(mailbox))
385 return IB_MAD_RESULT_FAILURE;
386
387 err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
Jack Morgensteinf9baff52011-12-13 04:10:51 +0000388 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
389 MLX4_CMD_WRAPPED);
Or Gerlitzc3779132011-06-15 14:51:27 +0000390 if (err)
391 err = IB_MAD_RESULT_FAILURE;
392 else {
393 memset(out_mad->data, 0, sizeof out_mad->data);
394 mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode;
395 switch (mode & 0xf) {
396 case 0:
397 edit_counter(mailbox->buf,
398 (void *)(out_mad->data + 40));
399 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
400 break;
401 default:
402 err = IB_MAD_RESULT_FAILURE;
403 }
404 }
405
406 mlx4_free_cmd_mailbox(dev->dev, mailbox);
407
408 return err;
409}
410
411int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
412 struct ib_wc *in_wc, struct ib_grh *in_grh,
413 struct ib_mad *in_mad, struct ib_mad *out_mad)
414{
415 switch (rdma_port_get_link_layer(ibdev, port_num)) {
416 case IB_LINK_LAYER_INFINIBAND:
417 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
418 in_grh, in_mad, out_mad);
419 case IB_LINK_LAYER_ETHERNET:
420 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
421 in_grh, in_mad, out_mad);
422 default:
423 return -EINVAL;
424 }
425}
426
Roland Dreier225c7b12007-05-08 18:00:38 -0700427static void send_handler(struct ib_mad_agent *agent,
428 struct ib_mad_send_wc *mad_send_wc)
429{
430 ib_free_send_mad(mad_send_wc->send_buf);
431}
432
433int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
434{
435 struct ib_mad_agent *agent;
436 int p, q;
437 int ret;
Eli Cohenfa417f72010-10-24 21:08:52 -0700438 enum rdma_link_layer ll;
Roland Dreier225c7b12007-05-08 18:00:38 -0700439
Eli Cohenfa417f72010-10-24 21:08:52 -0700440 for (p = 0; p < dev->num_ports; ++p) {
441 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
Roland Dreier225c7b12007-05-08 18:00:38 -0700442 for (q = 0; q <= 1; ++q) {
Eli Cohenfa417f72010-10-24 21:08:52 -0700443 if (ll == IB_LINK_LAYER_INFINIBAND) {
444 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
445 q ? IB_QPT_GSI : IB_QPT_SMI,
446 NULL, 0, send_handler,
447 NULL, NULL);
448 if (IS_ERR(agent)) {
449 ret = PTR_ERR(agent);
450 goto err;
451 }
452 dev->send_agent[p][q] = agent;
453 } else
454 dev->send_agent[p][q] = NULL;
Roland Dreier225c7b12007-05-08 18:00:38 -0700455 }
Eli Cohenfa417f72010-10-24 21:08:52 -0700456 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700457
458 return 0;
459
460err:
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -0700461 for (p = 0; p < dev->num_ports; ++p)
Roland Dreier225c7b12007-05-08 18:00:38 -0700462 for (q = 0; q <= 1; ++q)
463 if (dev->send_agent[p][q])
464 ib_unregister_mad_agent(dev->send_agent[p][q]);
465
466 return ret;
467}
468
469void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
470{
471 struct ib_mad_agent *agent;
472 int p, q;
473
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -0700474 for (p = 0; p < dev->num_ports; ++p) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700475 for (q = 0; q <= 1; ++q) {
476 agent = dev->send_agent[p][q];
Eli Cohenfa417f72010-10-24 21:08:52 -0700477 if (agent) {
478 dev->send_agent[p][q] = NULL;
479 ib_unregister_mad_agent(agent);
480 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700481 }
482
483 if (dev->sm_ah[p])
484 ib_destroy_ah(dev->sm_ah[p]);
485 }
486}
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300487
488void handle_port_mgmt_change_event(struct work_struct *work)
489{
490 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
491 struct mlx4_ib_dev *dev = ew->ib_dev;
492 struct mlx4_eqe *eqe = &(ew->ib_eqe);
493 u8 port = eqe->event.port_mgmt_change.port;
494 u32 changed_attr;
495
496 switch (eqe->subtype) {
497 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
498 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
499
500 /* Update the SM ah - This should be done before handling
501 the other changed attributes so that MADs can be sent to the SM */
502 if (changed_attr & MSTR_SM_CHANGE_MASK) {
503 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
504 u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
505 update_sm_ah(dev, port, lid, sl);
506 }
507
508 /* Check if it is a lid change event */
509 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
510 mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE);
511
512 /* Generate GUID changed event */
513 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK)
514 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
515
516 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
517 mlx4_ib_dispatch_event(dev, port,
518 IB_EVENT_CLIENT_REREGISTER);
519 break;
520
521 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
522 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
523 break;
524 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
Jack Morgenstein66349612012-06-19 11:21:44 +0300525 /* paravirtualized master's guid is guid 0 -- does not change */
526 if (!mlx4_is_master(dev->dev))
527 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300528 break;
529 default:
530 pr_warn("Unsupported subtype 0x%x for "
531 "Port Management Change event\n", eqe->subtype);
532 }
533
534 kfree(ew);
535}
536
537void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
538 enum ib_event_type type)
539{
540 struct ib_event event;
541
542 event.device = &dev->ib_dev;
543 event.element.port_num = port_num;
544 event.event = type;
545
546 ib_dispatch_event(&event);
547}
Jack Morgensteinfc065732012-08-03 08:40:42 +0000548
549static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
550{
551 unsigned long flags;
552 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
553 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
554 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
555 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
556 queue_work(ctx->wq, &ctx->work);
557 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
558}
559
560static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
561 struct mlx4_ib_demux_pv_qp *tun_qp,
562 int index)
563{
564 struct ib_sge sg_list;
565 struct ib_recv_wr recv_wr, *bad_recv_wr;
566 int size;
567
568 size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
569 sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
570
571 sg_list.addr = tun_qp->ring[index].map;
572 sg_list.length = size;
573 sg_list.lkey = ctx->mr->lkey;
574
575 recv_wr.next = NULL;
576 recv_wr.sg_list = &sg_list;
577 recv_wr.num_sge = 1;
578 recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
579 MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
580 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
581 size, DMA_FROM_DEVICE);
582 return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
583}
584
585static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
586 enum ib_qp_type qp_type, int is_tun)
587{
588 int i;
589 struct mlx4_ib_demux_pv_qp *tun_qp;
590 int rx_buf_size, tx_buf_size;
591
592 if (qp_type > IB_QPT_GSI)
593 return -EINVAL;
594
595 tun_qp = &ctx->qp[qp_type];
596
597 tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
598 GFP_KERNEL);
599 if (!tun_qp->ring)
600 return -ENOMEM;
601
602 tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
603 sizeof (struct mlx4_ib_tun_tx_buf),
604 GFP_KERNEL);
605 if (!tun_qp->tx_ring) {
606 kfree(tun_qp->ring);
607 tun_qp->ring = NULL;
608 return -ENOMEM;
609 }
610
611 if (is_tun) {
612 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
613 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
614 } else {
615 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
616 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
617 }
618
619 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
620 tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
621 if (!tun_qp->ring[i].addr)
622 goto err;
623 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
624 tun_qp->ring[i].addr,
625 rx_buf_size,
626 DMA_FROM_DEVICE);
627 }
628
629 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
630 tun_qp->tx_ring[i].buf.addr =
631 kmalloc(tx_buf_size, GFP_KERNEL);
632 if (!tun_qp->tx_ring[i].buf.addr)
633 goto tx_err;
634 tun_qp->tx_ring[i].buf.map =
635 ib_dma_map_single(ctx->ib_dev,
636 tun_qp->tx_ring[i].buf.addr,
637 tx_buf_size,
638 DMA_TO_DEVICE);
639 tun_qp->tx_ring[i].ah = NULL;
640 }
641 spin_lock_init(&tun_qp->tx_lock);
642 tun_qp->tx_ix_head = 0;
643 tun_qp->tx_ix_tail = 0;
644 tun_qp->proxy_qpt = qp_type;
645
646 return 0;
647
648tx_err:
649 while (i > 0) {
650 --i;
651 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
652 tx_buf_size, DMA_TO_DEVICE);
653 kfree(tun_qp->tx_ring[i].buf.addr);
654 }
655 kfree(tun_qp->tx_ring);
656 tun_qp->tx_ring = NULL;
657 i = MLX4_NUM_TUNNEL_BUFS;
658err:
659 while (i > 0) {
660 --i;
661 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
662 rx_buf_size, DMA_FROM_DEVICE);
663 kfree(tun_qp->ring[i].addr);
664 }
665 kfree(tun_qp->ring);
666 tun_qp->ring = NULL;
667 return -ENOMEM;
668}
669
670static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
671 enum ib_qp_type qp_type, int is_tun)
672{
673 int i;
674 struct mlx4_ib_demux_pv_qp *tun_qp;
675 int rx_buf_size, tx_buf_size;
676
677 if (qp_type > IB_QPT_GSI)
678 return;
679
680 tun_qp = &ctx->qp[qp_type];
681 if (is_tun) {
682 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
683 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
684 } else {
685 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
686 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
687 }
688
689
690 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
691 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
692 rx_buf_size, DMA_FROM_DEVICE);
693 kfree(tun_qp->ring[i].addr);
694 }
695
696 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
697 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
698 tx_buf_size, DMA_TO_DEVICE);
699 kfree(tun_qp->tx_ring[i].buf.addr);
700 if (tun_qp->tx_ring[i].ah)
701 ib_destroy_ah(tun_qp->tx_ring[i].ah);
702 }
703 kfree(tun_qp->tx_ring);
704 kfree(tun_qp->ring);
705}
706
707static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
708{
709 /* dummy until next patch in series */
710}
711
712static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
713{
714 struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
715
716 /* It's worse than that! He's dead, Jim! */
717 pr_err("Fatal error (%d) on a MAD QP on port %d\n",
718 event->event, sqp->port);
719}
720
721static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
722 enum ib_qp_type qp_type, int create_tun)
723{
724 int i, ret;
725 struct mlx4_ib_demux_pv_qp *tun_qp;
726 struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
727 struct ib_qp_attr attr;
728 int qp_attr_mask_INIT;
729
730 if (qp_type > IB_QPT_GSI)
731 return -EINVAL;
732
733 tun_qp = &ctx->qp[qp_type];
734
735 memset(&qp_init_attr, 0, sizeof qp_init_attr);
736 qp_init_attr.init_attr.send_cq = ctx->cq;
737 qp_init_attr.init_attr.recv_cq = ctx->cq;
738 qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
739 qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
740 qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
741 qp_init_attr.init_attr.cap.max_send_sge = 1;
742 qp_init_attr.init_attr.cap.max_recv_sge = 1;
743 if (create_tun) {
744 qp_init_attr.init_attr.qp_type = IB_QPT_UD;
745 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP;
746 qp_init_attr.port = ctx->port;
747 qp_init_attr.slave = ctx->slave;
748 qp_init_attr.proxy_qp_type = qp_type;
749 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
750 IB_QP_QKEY | IB_QP_PORT;
751 } else {
752 qp_init_attr.init_attr.qp_type = qp_type;
753 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP;
754 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
755 }
756 qp_init_attr.init_attr.port_num = ctx->port;
757 qp_init_attr.init_attr.qp_context = ctx;
758 qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
759 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
760 if (IS_ERR(tun_qp->qp)) {
761 ret = PTR_ERR(tun_qp->qp);
762 tun_qp->qp = NULL;
763 pr_err("Couldn't create %s QP (%d)\n",
764 create_tun ? "tunnel" : "special", ret);
765 return ret;
766 }
767
768 memset(&attr, 0, sizeof attr);
769 attr.qp_state = IB_QPS_INIT;
770 attr.pkey_index =
771 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
772 attr.qkey = IB_QP1_QKEY;
773 attr.port_num = ctx->port;
774 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
775 if (ret) {
776 pr_err("Couldn't change %s qp state to INIT (%d)\n",
777 create_tun ? "tunnel" : "special", ret);
778 goto err_qp;
779 }
780 attr.qp_state = IB_QPS_RTR;
781 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
782 if (ret) {
783 pr_err("Couldn't change %s qp state to RTR (%d)\n",
784 create_tun ? "tunnel" : "special", ret);
785 goto err_qp;
786 }
787 attr.qp_state = IB_QPS_RTS;
788 attr.sq_psn = 0;
789 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
790 if (ret) {
791 pr_err("Couldn't change %s qp state to RTS (%d)\n",
792 create_tun ? "tunnel" : "special", ret);
793 goto err_qp;
794 }
795
796 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
797 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
798 if (ret) {
799 pr_err(" mlx4_ib_post_pv_buf error"
800 " (err = %d, i = %d)\n", ret, i);
801 goto err_qp;
802 }
803 }
804 return 0;
805
806err_qp:
807 ib_destroy_qp(tun_qp->qp);
808 tun_qp->qp = NULL;
809 return ret;
810}
811
812/*
813 * IB MAD completion callback for real SQPs
814 */
815static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
816{
817 /* dummy until next patch in series */
818}
819
820static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
821 struct mlx4_ib_demux_pv_ctx **ret_ctx)
822{
823 struct mlx4_ib_demux_pv_ctx *ctx;
824
825 *ret_ctx = NULL;
826 ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
827 if (!ctx) {
828 pr_err("failed allocating pv resource context "
829 "for port %d, slave %d\n", port, slave);
830 return -ENOMEM;
831 }
832
833 ctx->ib_dev = &dev->ib_dev;
834 ctx->port = port;
835 ctx->slave = slave;
836 *ret_ctx = ctx;
837 return 0;
838}
839
840static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
841{
842 if (dev->sriov.demux[port - 1].tun[slave]) {
843 kfree(dev->sriov.demux[port - 1].tun[slave]);
844 dev->sriov.demux[port - 1].tun[slave] = NULL;
845 }
846}
847
848static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
849 int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
850{
851 int ret, cq_size;
852
853 ctx->state = DEMUX_PV_STATE_STARTING;
854 /* have QP0 only on port owner, and only if link layer is IB */
855 if (ctx->slave == mlx4_master_func_num(to_mdev(ctx->ib_dev)->dev) &&
856 rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND)
857 ctx->has_smi = 1;
858
859 if (ctx->has_smi) {
860 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
861 if (ret) {
862 pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
863 goto err_out;
864 }
865 }
866
867 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
868 if (ret) {
869 pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
870 goto err_out_qp0;
871 }
872
873 cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
874 if (ctx->has_smi)
875 cq_size *= 2;
876
877 ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
878 NULL, ctx, cq_size, 0);
879 if (IS_ERR(ctx->cq)) {
880 ret = PTR_ERR(ctx->cq);
881 pr_err("Couldn't create tunnel CQ (%d)\n", ret);
882 goto err_buf;
883 }
884
885 ctx->pd = ib_alloc_pd(ctx->ib_dev);
886 if (IS_ERR(ctx->pd)) {
887 ret = PTR_ERR(ctx->pd);
888 pr_err("Couldn't create tunnel PD (%d)\n", ret);
889 goto err_cq;
890 }
891
892 ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE);
893 if (IS_ERR(ctx->mr)) {
894 ret = PTR_ERR(ctx->mr);
895 pr_err("Couldn't get tunnel DMA MR (%d)\n", ret);
896 goto err_pd;
897 }
898
899 if (ctx->has_smi) {
900 ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
901 if (ret) {
902 pr_err("Couldn't create %s QP0 (%d)\n",
903 create_tun ? "tunnel for" : "", ret);
904 goto err_mr;
905 }
906 }
907
908 ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
909 if (ret) {
910 pr_err("Couldn't create %s QP1 (%d)\n",
911 create_tun ? "tunnel for" : "", ret);
912 goto err_qp0;
913 }
914
915 if (create_tun)
916 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
917 else
918 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
919
920 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
921
922 ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
923 if (ret) {
924 pr_err("Couldn't arm tunnel cq (%d)\n", ret);
925 goto err_wq;
926 }
927 ctx->state = DEMUX_PV_STATE_ACTIVE;
928 return 0;
929
930err_wq:
931 ctx->wq = NULL;
932 ib_destroy_qp(ctx->qp[1].qp);
933 ctx->qp[1].qp = NULL;
934
935
936err_qp0:
937 if (ctx->has_smi)
938 ib_destroy_qp(ctx->qp[0].qp);
939 ctx->qp[0].qp = NULL;
940
941err_mr:
942 ib_dereg_mr(ctx->mr);
943 ctx->mr = NULL;
944
945err_pd:
946 ib_dealloc_pd(ctx->pd);
947 ctx->pd = NULL;
948
949err_cq:
950 ib_destroy_cq(ctx->cq);
951 ctx->cq = NULL;
952
953err_buf:
954 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
955
956err_out_qp0:
957 if (ctx->has_smi)
958 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
959err_out:
960 ctx->state = DEMUX_PV_STATE_DOWN;
961 return ret;
962}
963
964static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
965 struct mlx4_ib_demux_pv_ctx *ctx, int flush)
966{
967 if (!ctx)
968 return;
969 if (ctx->state > DEMUX_PV_STATE_DOWN) {
970 ctx->state = DEMUX_PV_STATE_DOWNING;
971 if (flush)
972 flush_workqueue(ctx->wq);
973 if (ctx->has_smi) {
974 ib_destroy_qp(ctx->qp[0].qp);
975 ctx->qp[0].qp = NULL;
976 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
977 }
978 ib_destroy_qp(ctx->qp[1].qp);
979 ctx->qp[1].qp = NULL;
980 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
981 ib_dereg_mr(ctx->mr);
982 ctx->mr = NULL;
983 ib_dealloc_pd(ctx->pd);
984 ctx->pd = NULL;
985 ib_destroy_cq(ctx->cq);
986 ctx->cq = NULL;
987 ctx->state = DEMUX_PV_STATE_DOWN;
988 }
989}
990
991static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
992 int port, int do_init)
993{
994 int ret = 0;
995
996 if (!do_init) {
997 /* for master, destroy real sqp resources */
998 if (slave == mlx4_master_func_num(dev->dev))
999 destroy_pv_resources(dev, slave, port,
1000 dev->sriov.sqps[port - 1], 1);
1001 /* destroy the tunnel qp resources */
1002 destroy_pv_resources(dev, slave, port,
1003 dev->sriov.demux[port - 1].tun[slave], 1);
1004 return 0;
1005 }
1006
1007 /* create the tunnel qp resources */
1008 ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
1009 dev->sriov.demux[port - 1].tun[slave]);
1010
1011 /* for master, create the real sqp resources */
1012 if (!ret && slave == mlx4_master_func_num(dev->dev))
1013 ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
1014 dev->sriov.sqps[port - 1]);
1015 return ret;
1016}
1017
1018void mlx4_ib_tunnels_update_work(struct work_struct *work)
1019{
1020 struct mlx4_ib_demux_work *dmxw;
1021
1022 dmxw = container_of(work, struct mlx4_ib_demux_work, work);
1023 mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
1024 dmxw->do_init);
1025 kfree(dmxw);
1026 return;
1027}
1028
1029static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
1030 struct mlx4_ib_demux_ctx *ctx,
1031 int port)
1032{
1033 char name[12];
1034 int ret = 0;
1035 int i;
1036
1037 ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
1038 sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
1039 if (!ctx->tun)
1040 return -ENOMEM;
1041
1042 ctx->dev = dev;
1043 ctx->port = port;
1044 ctx->ib_dev = &dev->ib_dev;
1045
1046 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
1047 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
1048 if (ret) {
1049 ret = -ENOMEM;
1050 goto err_wq;
1051 }
1052 }
1053
1054 snprintf(name, sizeof name, "mlx4_ibt%d", port);
1055 ctx->wq = create_singlethread_workqueue(name);
1056 if (!ctx->wq) {
1057 pr_err("Failed to create tunnelling WQ for port %d\n", port);
1058 ret = -ENOMEM;
1059 goto err_wq;
1060 }
1061
1062 snprintf(name, sizeof name, "mlx4_ibud%d", port);
1063 ctx->ud_wq = create_singlethread_workqueue(name);
1064 if (!ctx->ud_wq) {
1065 pr_err("Failed to create up/down WQ for port %d\n", port);
1066 ret = -ENOMEM;
1067 goto err_udwq;
1068 }
1069
1070 return 0;
1071
1072err_udwq:
1073 destroy_workqueue(ctx->wq);
1074 ctx->wq = NULL;
1075
1076err_wq:
1077 for (i = 0; i < dev->dev->caps.sqp_demux; i++)
1078 free_pv_object(dev, i, port);
1079 kfree(ctx->tun);
1080 ctx->tun = NULL;
1081 return ret;
1082}
1083
1084static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
1085{
1086 if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
1087 sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
1088 flush_workqueue(sqp_ctx->wq);
1089 if (sqp_ctx->has_smi) {
1090 ib_destroy_qp(sqp_ctx->qp[0].qp);
1091 sqp_ctx->qp[0].qp = NULL;
1092 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
1093 }
1094 ib_destroy_qp(sqp_ctx->qp[1].qp);
1095 sqp_ctx->qp[1].qp = NULL;
1096 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
1097 ib_dereg_mr(sqp_ctx->mr);
1098 sqp_ctx->mr = NULL;
1099 ib_dealloc_pd(sqp_ctx->pd);
1100 sqp_ctx->pd = NULL;
1101 ib_destroy_cq(sqp_ctx->cq);
1102 sqp_ctx->cq = NULL;
1103 sqp_ctx->state = DEMUX_PV_STATE_DOWN;
1104 }
1105}
1106
1107static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
1108{
1109 int i;
1110 if (ctx) {
1111 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1112 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
1113 if (!ctx->tun[i])
1114 continue;
1115 if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
1116 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
1117 }
1118 flush_workqueue(ctx->wq);
1119 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
1120 destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
1121 free_pv_object(dev, i, ctx->port);
1122 }
1123 kfree(ctx->tun);
1124 destroy_workqueue(ctx->ud_wq);
1125 destroy_workqueue(ctx->wq);
1126 }
1127}
1128
1129static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
1130{
1131 int i;
1132
1133 if (!mlx4_is_master(dev->dev))
1134 return;
1135 /* initialize or tear down tunnel QPs for the master */
1136 for (i = 0; i < dev->dev->caps.num_ports; i++)
1137 mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
1138 return;
1139}
1140
1141int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
1142{
1143 int i = 0;
1144 int err;
1145
1146 if (!mlx4_is_mfunc(dev->dev))
1147 return 0;
1148
1149 dev->sriov.is_going_down = 0;
1150 spin_lock_init(&dev->sriov.going_down_lock);
1151
1152 mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
1153
1154 if (mlx4_is_slave(dev->dev)) {
1155 mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
1156 return 0;
1157 }
1158
1159 mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
1160 dev->dev->caps.sqp_demux);
1161 for (i = 0; i < dev->num_ports; i++) {
1162 err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
1163 &dev->sriov.sqps[i]);
1164 if (err)
1165 goto demux_err;
1166 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
1167 if (err)
1168 goto demux_err;
1169 }
1170 mlx4_ib_master_tunnels(dev, 1);
1171 return 0;
1172
1173demux_err:
1174 while (i > 0) {
1175 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
1176 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
1177 --i;
1178 }
1179
1180 return err;
1181}
1182
1183void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
1184{
1185 int i;
1186 unsigned long flags;
1187
1188 if (!mlx4_is_mfunc(dev->dev))
1189 return;
1190
1191 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1192 dev->sriov.is_going_down = 1;
1193 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1194 if (mlx4_is_master(dev->dev))
1195 for (i = 0; i < dev->num_ports; i++) {
1196 flush_workqueue(dev->sriov.demux[i].ud_wq);
1197 mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
1198 kfree(dev->sriov.sqps[i]);
1199 dev->sriov.sqps[i] = NULL;
1200 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
1201 }
1202}