blob: 2281de122038e45a5c375f7d7669111ab2aee2de [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Hal Rosenstockde493d42007-04-02 11:24:07 -04002 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
Hal Rosenstockfa619a72005-07-27 11:45:37 -07003 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07005 * Copyright (c) 2009 HNR Consulting. All rights reserved.
Ira Weiny8e4349d2015-06-10 16:16:48 -04006 * Copyright (c) 2014 Intel Corporation. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 */
Ira Weiny7ef5d4b2014-08-08 19:00:53 -040037
38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090041#include <linux/slab.h>
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040042#include <linux/module.h>
Jack Morgenstein9874e742006-06-17 20:37:34 -070043#include <rdma/ib_cache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45#include "mad_priv.h"
Hal Rosenstockfa619a72005-07-27 11:45:37 -070046#include "mad_rmpp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include "smi.h"
Ira Weiny8e4349d2015-06-10 16:16:48 -040048#include "opa_smi.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include "agent.h"
50
51MODULE_LICENSE("Dual BSD/GPL");
52MODULE_DESCRIPTION("kernel IB MAD API");
53MODULE_AUTHOR("Hal Rosenstock");
54MODULE_AUTHOR("Sean Hefty");
55
Roland Dreier16933952010-05-23 21:39:31 -070056static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
57static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
Hal Rosenstockb76aabc2009-09-07 08:28:48 -070058
59module_param_named(send_queue_size, mad_sendq_size, int, 0444);
60MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
61module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
62MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064static struct list_head ib_mad_port_list;
65static u32 ib_mad_client_id = 0;
66
67/* Port list lock */
Roland Dreier6276e082009-09-05 20:24:23 -070068static DEFINE_SPINLOCK(ib_mad_port_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70/* Forward declarations */
71static int method_in_use(struct ib_mad_mgmt_method_table **method,
72 struct ib_mad_reg_req *mad_reg_req);
73static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
74static struct ib_mad_agent_private *find_mad_agent(
75 struct ib_mad_port_private *port_priv,
Ira Weinyd94bd262015-06-06 14:38:22 -040076 const struct ib_mad_hdr *mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
78 struct ib_mad_private *mad);
79static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
David Howellsc4028952006-11-22 14:57:56 +000080static void timeout_sends(struct work_struct *work);
81static void local_completions(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
83 struct ib_mad_agent_private *agent_priv,
84 u8 mgmt_class);
85static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86 struct ib_mad_agent_private *agent_priv);
87
88/*
89 * Returns a ib_mad_port_private structure or NULL for a device/port
90 * Assumes ib_mad_port_list_lock is being held
91 */
92static inline struct ib_mad_port_private *
93__ib_get_mad_port(struct ib_device *device, int port_num)
94{
95 struct ib_mad_port_private *entry;
96
97 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
98 if (entry->device == device && entry->port_num == port_num)
99 return entry;
100 }
101 return NULL;
102}
103
104/*
105 * Wrapper function to return a ib_mad_port_private structure or NULL
106 * for a device/port
107 */
108static inline struct ib_mad_port_private *
109ib_get_mad_port(struct ib_device *device, int port_num)
110{
111 struct ib_mad_port_private *entry;
112 unsigned long flags;
113
114 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
115 entry = __ib_get_mad_port(device, port_num);
116 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
117
118 return entry;
119}
120
121static inline u8 convert_mgmt_class(u8 mgmt_class)
122{
123 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
124 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
125 0 : mgmt_class;
126}
127
128static int get_spl_qp_index(enum ib_qp_type qp_type)
129{
130 switch (qp_type)
131 {
132 case IB_QPT_SMI:
133 return 0;
134 case IB_QPT_GSI:
135 return 1;
136 default:
137 return -1;
138 }
139}
140
141static int vendor_class_index(u8 mgmt_class)
142{
143 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
144}
145
146static int is_vendor_class(u8 mgmt_class)
147{
148 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
149 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
150 return 0;
151 return 1;
152}
153
154static int is_vendor_oui(char *oui)
155{
156 if (oui[0] || oui[1] || oui[2])
157 return 1;
158 return 0;
159}
160
161static int is_vendor_method_in_use(
162 struct ib_mad_mgmt_vendor_class *vendor_class,
163 struct ib_mad_reg_req *mad_reg_req)
164{
165 struct ib_mad_mgmt_method_table *method;
166 int i;
167
168 for (i = 0; i < MAX_MGMT_OUI; i++) {
169 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
170 method = vendor_class->method_table[i];
171 if (method) {
172 if (method_in_use(&method, mad_reg_req))
173 return 1;
174 else
175 break;
176 }
177 }
178 }
179 return 0;
180}
181
Ira Weiny96909302015-05-08 14:27:22 -0400182int ib_response_mad(const struct ib_mad_hdr *hdr)
Sean Hefty2527e682006-07-20 11:25:50 +0300183{
Ira Weiny96909302015-05-08 14:27:22 -0400184 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
185 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
186 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
187 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
Sean Hefty2527e682006-07-20 11:25:50 +0300188}
189EXPORT_SYMBOL(ib_response_mad);
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191/*
192 * ib_register_mad_agent - Register to send/receive MADs
193 */
194struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
195 u8 port_num,
196 enum ib_qp_type qp_type,
197 struct ib_mad_reg_req *mad_reg_req,
198 u8 rmpp_version,
199 ib_mad_send_handler send_handler,
200 ib_mad_recv_handler recv_handler,
Ira Weiny0f29b462014-08-08 19:00:55 -0400201 void *context,
202 u32 registration_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
204 struct ib_mad_port_private *port_priv;
205 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
206 struct ib_mad_agent_private *mad_agent_priv;
207 struct ib_mad_reg_req *reg_req = NULL;
208 struct ib_mad_mgmt_class_table *class;
209 struct ib_mad_mgmt_vendor_class_table *vendor;
210 struct ib_mad_mgmt_vendor_class *vendor_class;
211 struct ib_mad_mgmt_method_table *method;
212 int ret2, qpn;
213 unsigned long flags;
214 u8 mgmt_class, vclass;
215
216 /* Validate parameters */
217 qpn = get_spl_qp_index(qp_type);
Ira Weiny9ad13a42014-08-08 19:00:54 -0400218 if (qpn == -1) {
219 dev_notice(&device->dev,
220 "ib_register_mad_agent: invalid QP Type %d\n",
221 qp_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400223 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Ira Weiny9ad13a42014-08-08 19:00:54 -0400225 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
226 dev_notice(&device->dev,
227 "ib_register_mad_agent: invalid RMPP Version %u\n",
228 rmpp_version);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700229 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
232 /* Validate MAD registration request if supplied */
233 if (mad_reg_req) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400234 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
235 dev_notice(&device->dev,
236 "ib_register_mad_agent: invalid Class Version %u\n",
237 mad_reg_req->mgmt_class_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400239 }
240 if (!recv_handler) {
241 dev_notice(&device->dev,
242 "ib_register_mad_agent: no recv_handler\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
246 /*
247 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
248 * one in this range currently allowed
249 */
250 if (mad_reg_req->mgmt_class !=
Ira Weiny9ad13a42014-08-08 19:00:54 -0400251 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
252 dev_notice(&device->dev,
253 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
254 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 } else if (mad_reg_req->mgmt_class == 0) {
258 /*
259 * Class 0 is reserved in IBA and is used for
260 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
261 */
Ira Weiny9ad13a42014-08-08 19:00:54 -0400262 dev_notice(&device->dev,
263 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 goto error1;
265 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
266 /*
267 * If class is in "new" vendor range,
268 * ensure supplied OUI is not zero
269 */
Ira Weiny9ad13a42014-08-08 19:00:54 -0400270 if (!is_vendor_oui(mad_reg_req->oui)) {
271 dev_notice(&device->dev,
272 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
273 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 }
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800277 /* Make sure class supplied is consistent with RMPP */
Hal Rosenstock64cb9c62006-04-12 21:29:10 -0400278 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400279 if (rmpp_version) {
280 dev_notice(&device->dev,
281 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
282 mad_reg_req->mgmt_class);
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800283 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400284 }
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800285 }
Ira Weiny1471cb62014-08-08 19:00:56 -0400286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 /* Make sure class supplied is consistent with QP type */
288 if (qp_type == IB_QPT_SMI) {
289 if ((mad_reg_req->mgmt_class !=
290 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
291 (mad_reg_req->mgmt_class !=
Ira Weiny9ad13a42014-08-08 19:00:54 -0400292 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
293 dev_notice(&device->dev,
294 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
295 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400297 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 } else {
299 if ((mad_reg_req->mgmt_class ==
300 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
301 (mad_reg_req->mgmt_class ==
Ira Weiny9ad13a42014-08-08 19:00:54 -0400302 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
303 dev_notice(&device->dev,
304 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
305 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400307 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 }
309 } else {
310 /* No registration request supplied */
311 if (!send_handler)
312 goto error1;
Ira Weiny1471cb62014-08-08 19:00:56 -0400313 if (registration_flags & IB_MAD_USER_RMPP)
314 goto error1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 }
316
317 /* Validate device and port */
318 port_priv = ib_get_mad_port(device, port_num);
319 if (!port_priv) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400320 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 ret = ERR_PTR(-ENODEV);
322 goto error1;
323 }
324
Ira Weinyc8367c42011-05-19 18:19:28 -0700325 /* Verify the QP requested is supported. For example, Ethernet devices
326 * will not have QP0 */
327 if (!port_priv->qp_info[qpn].qp) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400328 dev_notice(&device->dev,
329 "ib_register_mad_agent: QP %d not supported\n", qpn);
Ira Weinyc8367c42011-05-19 18:19:28 -0700330 ret = ERR_PTR(-EPROTONOSUPPORT);
331 goto error1;
332 }
333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 /* Allocate structures */
Roland Dreierde6eb662005-11-02 07:23:14 -0800335 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 if (!mad_agent_priv) {
337 ret = ERR_PTR(-ENOMEM);
338 goto error1;
339 }
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 if (mad_reg_req) {
Julia Lawall9893e742010-05-15 23:22:38 +0200342 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 if (!reg_req) {
344 ret = ERR_PTR(-ENOMEM);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700345 goto error3;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 }
348
349 /* Now, fill in the various structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
351 mad_agent_priv->reg_req = reg_req;
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700352 mad_agent_priv->agent.rmpp_version = rmpp_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 mad_agent_priv->agent.device = device;
354 mad_agent_priv->agent.recv_handler = recv_handler;
355 mad_agent_priv->agent.send_handler = send_handler;
356 mad_agent_priv->agent.context = context;
357 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
358 mad_agent_priv->agent.port_num = port_num;
Ira Weiny0f29b462014-08-08 19:00:55 -0400359 mad_agent_priv->agent.flags = registration_flags;
Ralph Campbelld9620a42009-02-27 14:44:32 -0800360 spin_lock_init(&mad_agent_priv->lock);
361 INIT_LIST_HEAD(&mad_agent_priv->send_list);
362 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
363 INIT_LIST_HEAD(&mad_agent_priv->done_list);
364 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
365 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
366 INIT_LIST_HEAD(&mad_agent_priv->local_list);
367 INIT_WORK(&mad_agent_priv->local_work, local_completions);
368 atomic_set(&mad_agent_priv->refcount, 1);
369 init_completion(&mad_agent_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
371 spin_lock_irqsave(&port_priv->reg_lock, flags);
372 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
373
374 /*
375 * Make sure MAD registration (if supplied)
376 * is non overlapping with any existing ones
377 */
378 if (mad_reg_req) {
379 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
380 if (!is_vendor_class(mgmt_class)) {
381 class = port_priv->version[mad_reg_req->
382 mgmt_class_version].class;
383 if (class) {
384 method = class->method_table[mgmt_class];
385 if (method) {
386 if (method_in_use(&method,
387 mad_reg_req))
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700388 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 }
390 }
391 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
392 mgmt_class);
393 } else {
394 /* "New" vendor class range */
395 vendor = port_priv->version[mad_reg_req->
396 mgmt_class_version].vendor;
397 if (vendor) {
398 vclass = vendor_class_index(mgmt_class);
399 vendor_class = vendor->vendor_class[vclass];
400 if (vendor_class) {
401 if (is_vendor_method_in_use(
402 vendor_class,
403 mad_reg_req))
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700404 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 }
406 }
407 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
408 }
409 if (ret2) {
410 ret = ERR_PTR(ret2);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700411 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 }
413 }
414
415 /* Add mad agent into port's agent list */
416 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
417 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
418
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 return &mad_agent_priv->agent;
420
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700421error4:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
423 kfree(reg_req);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700424error3:
Adrian Bunk2012a112005-11-27 00:37:36 +0100425 kfree(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426error1:
427 return ret;
428}
429EXPORT_SYMBOL(ib_register_mad_agent);
430
431static inline int is_snooping_sends(int mad_snoop_flags)
432{
433 return (mad_snoop_flags &
434 (/*IB_MAD_SNOOP_POSTED_SENDS |
435 IB_MAD_SNOOP_RMPP_SENDS |*/
436 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
437 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
438}
439
440static inline int is_snooping_recvs(int mad_snoop_flags)
441{
442 return (mad_snoop_flags &
443 (IB_MAD_SNOOP_RECVS /*|
444 IB_MAD_SNOOP_RMPP_RECVS*/));
445}
446
447static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
448 struct ib_mad_snoop_private *mad_snoop_priv)
449{
450 struct ib_mad_snoop_private **new_snoop_table;
451 unsigned long flags;
452 int i;
453
454 spin_lock_irqsave(&qp_info->snoop_lock, flags);
455 /* Check for empty slot in array. */
456 for (i = 0; i < qp_info->snoop_table_size; i++)
457 if (!qp_info->snoop_table[i])
458 break;
459
460 if (i == qp_info->snoop_table_size) {
461 /* Grow table. */
Roland Dreier528051742008-10-14 14:05:36 -0700462 new_snoop_table = krealloc(qp_info->snoop_table,
463 sizeof mad_snoop_priv *
464 (qp_info->snoop_table_size + 1),
465 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 if (!new_snoop_table) {
467 i = -ENOMEM;
468 goto out;
469 }
Roland Dreier528051742008-10-14 14:05:36 -0700470
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 qp_info->snoop_table = new_snoop_table;
472 qp_info->snoop_table_size++;
473 }
474 qp_info->snoop_table[i] = mad_snoop_priv;
475 atomic_inc(&qp_info->snoop_count);
476out:
477 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
478 return i;
479}
480
481struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
482 u8 port_num,
483 enum ib_qp_type qp_type,
484 int mad_snoop_flags,
485 ib_mad_snoop_handler snoop_handler,
486 ib_mad_recv_handler recv_handler,
487 void *context)
488{
489 struct ib_mad_port_private *port_priv;
490 struct ib_mad_agent *ret;
491 struct ib_mad_snoop_private *mad_snoop_priv;
492 int qpn;
493
494 /* Validate parameters */
495 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
496 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
497 ret = ERR_PTR(-EINVAL);
498 goto error1;
499 }
500 qpn = get_spl_qp_index(qp_type);
501 if (qpn == -1) {
502 ret = ERR_PTR(-EINVAL);
503 goto error1;
504 }
505 port_priv = ib_get_mad_port(device, port_num);
506 if (!port_priv) {
507 ret = ERR_PTR(-ENODEV);
508 goto error1;
509 }
510 /* Allocate structures */
Roland Dreierde6eb662005-11-02 07:23:14 -0800511 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 if (!mad_snoop_priv) {
513 ret = ERR_PTR(-ENOMEM);
514 goto error1;
515 }
516
517 /* Now, fill in the various structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
519 mad_snoop_priv->agent.device = device;
520 mad_snoop_priv->agent.recv_handler = recv_handler;
521 mad_snoop_priv->agent.snoop_handler = snoop_handler;
522 mad_snoop_priv->agent.context = context;
523 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
524 mad_snoop_priv->agent.port_num = port_num;
525 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
Sean Hefty1b52fa982006-05-12 14:57:52 -0700526 init_completion(&mad_snoop_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 mad_snoop_priv->snoop_index = register_snoop_agent(
528 &port_priv->qp_info[qpn],
529 mad_snoop_priv);
530 if (mad_snoop_priv->snoop_index < 0) {
531 ret = ERR_PTR(mad_snoop_priv->snoop_index);
532 goto error2;
533 }
534
535 atomic_set(&mad_snoop_priv->refcount, 1);
536 return &mad_snoop_priv->agent;
537
538error2:
539 kfree(mad_snoop_priv);
540error1:
541 return ret;
542}
543EXPORT_SYMBOL(ib_register_mad_snoop);
544
Sean Hefty1b52fa982006-05-12 14:57:52 -0700545static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
546{
547 if (atomic_dec_and_test(&mad_agent_priv->refcount))
548 complete(&mad_agent_priv->comp);
549}
550
551static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
552{
553 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
554 complete(&mad_snoop_priv->comp);
555}
556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
558{
559 struct ib_mad_port_private *port_priv;
560 unsigned long flags;
561
562 /* Note that we could still be handling received MADs */
563
564 /*
565 * Canceling all sends results in dropping received response
566 * MADs, preventing us from queuing additional work
567 */
568 cancel_mads(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 port_priv = mad_agent_priv->qp_info->port_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
572 spin_lock_irqsave(&port_priv->reg_lock, flags);
573 remove_mad_reg_req(mad_agent_priv);
574 list_del(&mad_agent_priv->agent_list);
575 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
576
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700577 flush_workqueue(port_priv->wq);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700578 ib_cancel_rmpp_recvs(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
Sean Hefty1b52fa982006-05-12 14:57:52 -0700580 deref_mad_agent(mad_agent_priv);
581 wait_for_completion(&mad_agent_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
Jesper Juhl6044ec82005-11-07 01:01:32 -0800583 kfree(mad_agent_priv->reg_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 kfree(mad_agent_priv);
585}
586
587static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
588{
589 struct ib_mad_qp_info *qp_info;
590 unsigned long flags;
591
592 qp_info = mad_snoop_priv->qp_info;
593 spin_lock_irqsave(&qp_info->snoop_lock, flags);
594 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
595 atomic_dec(&qp_info->snoop_count);
596 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
597
Sean Hefty1b52fa982006-05-12 14:57:52 -0700598 deref_snoop_agent(mad_snoop_priv);
599 wait_for_completion(&mad_snoop_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
601 kfree(mad_snoop_priv);
602}
603
604/*
605 * ib_unregister_mad_agent - Unregisters a client from using MAD services
606 */
607int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
608{
609 struct ib_mad_agent_private *mad_agent_priv;
610 struct ib_mad_snoop_private *mad_snoop_priv;
611
612 /* If the TID is zero, the agent can only snoop. */
613 if (mad_agent->hi_tid) {
614 mad_agent_priv = container_of(mad_agent,
615 struct ib_mad_agent_private,
616 agent);
617 unregister_mad_agent(mad_agent_priv);
618 } else {
619 mad_snoop_priv = container_of(mad_agent,
620 struct ib_mad_snoop_private,
621 agent);
622 unregister_mad_snoop(mad_snoop_priv);
623 }
624 return 0;
625}
626EXPORT_SYMBOL(ib_unregister_mad_agent);
627
628static void dequeue_mad(struct ib_mad_list_head *mad_list)
629{
630 struct ib_mad_queue *mad_queue;
631 unsigned long flags;
632
633 BUG_ON(!mad_list->mad_queue);
634 mad_queue = mad_list->mad_queue;
635 spin_lock_irqsave(&mad_queue->lock, flags);
636 list_del(&mad_list->list);
637 mad_queue->count--;
638 spin_unlock_irqrestore(&mad_queue->lock, flags);
639}
640
641static void snoop_send(struct ib_mad_qp_info *qp_info,
Sean Hefty34816ad2005-10-25 10:51:39 -0700642 struct ib_mad_send_buf *send_buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 struct ib_mad_send_wc *mad_send_wc,
644 int mad_snoop_flags)
645{
646 struct ib_mad_snoop_private *mad_snoop_priv;
647 unsigned long flags;
648 int i;
649
650 spin_lock_irqsave(&qp_info->snoop_lock, flags);
651 for (i = 0; i < qp_info->snoop_table_size; i++) {
652 mad_snoop_priv = qp_info->snoop_table[i];
653 if (!mad_snoop_priv ||
654 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
655 continue;
656
657 atomic_inc(&mad_snoop_priv->refcount);
658 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
659 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
Sean Hefty34816ad2005-10-25 10:51:39 -0700660 send_buf, mad_send_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -0700661 deref_snoop_agent(mad_snoop_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 spin_lock_irqsave(&qp_info->snoop_lock, flags);
663 }
664 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
665}
666
667static void snoop_recv(struct ib_mad_qp_info *qp_info,
668 struct ib_mad_recv_wc *mad_recv_wc,
669 int mad_snoop_flags)
670{
671 struct ib_mad_snoop_private *mad_snoop_priv;
672 unsigned long flags;
673 int i;
674
675 spin_lock_irqsave(&qp_info->snoop_lock, flags);
676 for (i = 0; i < qp_info->snoop_table_size; i++) {
677 mad_snoop_priv = qp_info->snoop_table[i];
678 if (!mad_snoop_priv ||
679 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
680 continue;
681
682 atomic_inc(&mad_snoop_priv->refcount);
683 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
684 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
685 mad_recv_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -0700686 deref_snoop_agent(mad_snoop_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 spin_lock_irqsave(&qp_info->snoop_lock, flags);
688 }
689 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
690}
691
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200692static void build_smp_wc(struct ib_qp *qp,
693 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 struct ib_wc *wc)
695{
696 memset(wc, 0, sizeof *wc);
697 wc->wr_id = wr_id;
698 wc->status = IB_WC_SUCCESS;
699 wc->opcode = IB_WC_RECV;
700 wc->pkey_index = pkey_index;
701 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
702 wc->src_qp = IB_QP0;
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200703 wc->qp = qp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 wc->slid = slid;
705 wc->sl = 0;
706 wc->dlid_path_bits = 0;
707 wc->port_num = port_num;
708}
709
Ira Weinyc9082e52015-06-06 14:38:30 -0400710static size_t mad_priv_size(const struct ib_mad_private *mp)
711{
712 return sizeof(struct ib_mad_private) + mp->mad_size;
713}
714
715static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
716{
717 size_t size = sizeof(struct ib_mad_private) + mad_size;
718 struct ib_mad_private *ret = kzalloc(size, flags);
719
720 if (ret)
721 ret->mad_size = mad_size;
722
723 return ret;
724}
725
726static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
727{
728 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
729}
730
731static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
732{
733 return sizeof(struct ib_grh) + mp->mad_size;
734}
735
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736/*
737 * Return 0 if SMP is to be sent
738 * Return 1 if SMP was consumed locally (whether or not solicited)
739 * Return < 0 if error
740 */
741static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
Sean Hefty34816ad2005-10-25 10:51:39 -0700742 struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743{
Hal Rosenstockde493d42007-04-02 11:24:07 -0400744 int ret = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -0700745 struct ib_smp *smp = mad_send_wr->send_buf.mad;
Ira Weiny8e4349d2015-06-10 16:16:48 -0400746 struct opa_smp *opa_smp = (struct opa_smp *)smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 unsigned long flags;
748 struct ib_mad_local_private *local;
749 struct ib_mad_private *mad_priv;
750 struct ib_mad_port_private *port_priv;
751 struct ib_mad_agent_private *recv_mad_agent = NULL;
752 struct ib_device *device = mad_agent_priv->agent.device;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -0400753 u8 port_num;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 struct ib_wc mad_wc;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100755 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
Ira Weinyc9082e52015-06-06 14:38:30 -0400756 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
Ira Weiny4cd7c942015-06-06 14:38:31 -0400757 u16 out_mad_pkey_index = 0;
Ira Weiny8e4349d2015-06-10 16:16:48 -0400758 u16 drslid;
759 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
760 mad_agent_priv->qp_info->port_priv->port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
Hal Rosenstock41390322015-06-29 09:57:00 -0400762 if (rdma_cap_ib_switch(device) &&
Hal Rosenstock1bae4db2007-05-14 17:21:52 -0400763 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100764 port_num = send_wr->port_num;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -0400765 else
766 port_num = mad_agent_priv->agent.port_num;
767
Ralph Campbell8cf3f042006-02-03 14:28:48 -0800768 /*
769 * Directed route handling starts if the initial LID routed part of
770 * a request or the ending LID routed part of a response is empty.
771 * If we are at the start of the LID routed part, don't update the
772 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
773 */
Ira Weiny8e4349d2015-06-10 16:16:48 -0400774 if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
775 u32 opa_drslid;
Hal Rosenstockde493d42007-04-02 11:24:07 -0400776
Ira Weiny8e4349d2015-06-10 16:16:48 -0400777 if ((opa_get_smp_direction(opa_smp)
778 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
779 OPA_LID_PERMISSIVE &&
Hal Rosenstock41390322015-06-29 09:57:00 -0400780 opa_smi_handle_dr_smp_send(opa_smp,
781 rdma_cap_ib_switch(device),
Ira Weiny8e4349d2015-06-10 16:16:48 -0400782 port_num) == IB_SMI_DISCARD) {
783 ret = -EINVAL;
784 dev_err(&device->dev, "OPA Invalid directed route\n");
785 goto out;
786 }
787 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
Ira Weinycd4cd562015-06-25 12:04:49 -0400788 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
Ira Weiny8e4349d2015-06-10 16:16:48 -0400789 opa_drslid & 0xffff0000) {
790 ret = -EINVAL;
791 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
792 opa_drslid);
793 goto out;
794 }
795 drslid = (u16)(opa_drslid & 0x0000ffff);
796
797 /* Check to post send on QP or process locally */
798 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
799 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
800 goto out;
801 } else {
802 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
803 IB_LID_PERMISSIVE &&
Hal Rosenstock41390322015-06-29 09:57:00 -0400804 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
Ira Weiny8e4349d2015-06-10 16:16:48 -0400805 IB_SMI_DISCARD) {
806 ret = -EINVAL;
807 dev_err(&device->dev, "Invalid directed route\n");
808 goto out;
809 }
810 drslid = be16_to_cpu(smp->dr_slid);
811
812 /* Check to post send on QP or process locally */
813 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
814 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
815 goto out;
816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
818 local = kmalloc(sizeof *local, GFP_ATOMIC);
819 if (!local) {
820 ret = -ENOMEM;
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400821 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 goto out;
823 }
824 local->mad_priv = NULL;
825 local->recv_mad_agent = NULL;
Ira Weinyc9082e52015-06-06 14:38:30 -0400826 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 if (!mad_priv) {
828 ret = -ENOMEM;
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400829 dev_err(&device->dev, "No memory for local response MAD\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 kfree(local);
831 goto out;
832 }
833
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200834 build_smp_wc(mad_agent_priv->agent.qp,
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100835 send_wr->wr.wr_id, drslid,
836 send_wr->pkey_index,
837 send_wr->port_num, &mad_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
Ira Weiny8e4349d2015-06-10 16:16:48 -0400839 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
840 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
841 + mad_send_wr->send_buf.data_len
842 + sizeof(struct ib_grh);
843 }
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 /* No GRH for DR SMP */
846 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
Ira Weiny4cd7c942015-06-06 14:38:31 -0400847 (const struct ib_mad_hdr *)smp, mad_size,
848 (struct ib_mad_hdr *)mad_priv->mad,
849 &mad_size, &out_mad_pkey_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 switch (ret)
851 {
852 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
Ira Weinyc9082e52015-06-06 14:38:30 -0400853 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 mad_agent_priv->agent.recv_handler) {
855 local->mad_priv = mad_priv;
856 local->recv_mad_agent = mad_agent_priv;
857 /*
858 * Reference MAD agent until receive
859 * side of local completion handled
860 */
861 atomic_inc(&mad_agent_priv->refcount);
862 } else
Ira Weinyc9082e52015-06-06 14:38:30 -0400863 kfree(mad_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 break;
865 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
Ira Weinyc9082e52015-06-06 14:38:30 -0400866 kfree(mad_priv);
Ralph Campbell4780c192009-03-03 14:22:17 -0800867 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 case IB_MAD_RESULT_SUCCESS:
869 /* Treat like an incoming receive MAD */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
871 mad_agent_priv->agent.port_num);
872 if (port_priv) {
Ira Weinyc9082e52015-06-06 14:38:30 -0400873 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 recv_mad_agent = find_mad_agent(port_priv,
Ira Weinyc9082e52015-06-06 14:38:30 -0400875 (const struct ib_mad_hdr *)mad_priv->mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 }
877 if (!port_priv || !recv_mad_agent) {
Ralph Campbell4780c192009-03-03 14:22:17 -0800878 /*
879 * No receiving agent so drop packet and
880 * generate send completion.
881 */
Ira Weinyc9082e52015-06-06 14:38:30 -0400882 kfree(mad_priv);
Ralph Campbell4780c192009-03-03 14:22:17 -0800883 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 }
885 local->mad_priv = mad_priv;
886 local->recv_mad_agent = recv_mad_agent;
887 break;
888 default:
Ira Weinyc9082e52015-06-06 14:38:30 -0400889 kfree(mad_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 kfree(local);
891 ret = -EINVAL;
892 goto out;
893 }
894
Sean Hefty34816ad2005-10-25 10:51:39 -0700895 local->mad_send_wr = mad_send_wr;
Ira Weiny8e4349d2015-06-10 16:16:48 -0400896 if (opa) {
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100897 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
Ira Weiny8e4349d2015-06-10 16:16:48 -0400898 local->return_wc_byte_len = mad_size;
899 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 /* Reference MAD agent until send side of local completion handled */
901 atomic_inc(&mad_agent_priv->refcount);
902 /* Queue local completion to local list */
903 spin_lock_irqsave(&mad_agent_priv->lock, flags);
904 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
905 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
906 queue_work(mad_agent_priv->qp_info->port_priv->wq,
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700907 &mad_agent_priv->local_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 ret = 1;
909out:
910 return ret;
911}
912
Ira Weiny548ead12015-06-06 14:38:33 -0400913static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700914{
915 int seg_size, pad;
916
Ira Weiny548ead12015-06-06 14:38:33 -0400917 seg_size = mad_size - hdr_len;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700918 if (data_len && seg_size) {
919 pad = seg_size - data_len % seg_size;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800920 return pad == seg_size ? 0 : pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700921 } else
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800922 return seg_size;
923}
924
925static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
926{
927 struct ib_rmpp_segment *s, *t;
928
929 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
930 list_del(&s->list);
931 kfree(s);
932 }
933}
934
935static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
Ira Weiny548ead12015-06-06 14:38:33 -0400936 size_t mad_size, gfp_t gfp_mask)
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800937{
938 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
939 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
940 struct ib_rmpp_segment *seg = NULL;
941 int left, seg_size, pad;
942
Ira Weiny548ead12015-06-06 14:38:33 -0400943 send_buf->seg_size = mad_size - send_buf->hdr_len;
944 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800945 seg_size = send_buf->seg_size;
946 pad = send_wr->pad;
947
948 /* Allocate data segments. */
949 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
950 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
951 if (!seg) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400952 dev_err(&send_buf->mad_agent->device->dev,
953 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
954 sizeof (*seg) + seg_size, gfp_mask);
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800955 free_send_rmpp_list(send_wr);
956 return -ENOMEM;
957 }
958 seg->num = ++send_buf->seg_count;
959 list_add_tail(&seg->list, &send_wr->rmpp_list);
960 }
961
962 /* Zero any padding */
963 if (pad)
964 memset(seg->data + seg_size - pad, 0, pad);
965
966 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
967 agent.rmpp_version;
968 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
969 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
970
971 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
972 struct ib_rmpp_segment, list);
973 send_wr->last_ack_seg = send_wr->cur_seg;
974 return 0;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700975}
976
Ira Weinyf766c582015-05-08 14:27:24 -0400977int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
Ira Weiny1471cb62014-08-08 19:00:56 -0400978{
979 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
980}
981EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
982
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700983struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
984 u32 remote_qpn, u16 pkey_index,
Sean Hefty34816ad2005-10-25 10:51:39 -0700985 int rmpp_active,
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700986 int hdr_len, int data_len,
Ira Weinyda2dfaa2015-06-06 14:38:28 -0400987 gfp_t gfp_mask,
988 u8 base_version)
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700989{
990 struct ib_mad_agent_private *mad_agent_priv;
Sean Hefty34816ad2005-10-25 10:51:39 -0700991 struct ib_mad_send_wr_private *mad_send_wr;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800992 int pad, message_size, ret, size;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700993 void *buf;
Ira Weiny548ead12015-06-06 14:38:33 -0400994 size_t mad_size;
995 bool opa;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700996
Sean Hefty34816ad2005-10-25 10:51:39 -0700997 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
998 agent);
Ira Weiny548ead12015-06-06 14:38:33 -0400999
1000 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1001
1002 if (opa && base_version == OPA_MGMT_BASE_VERSION)
1003 mad_size = sizeof(struct opa_mad);
1004 else
1005 mad_size = sizeof(struct ib_mad);
1006
1007 pad = get_pad_size(hdr_len, data_len, mad_size);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001008 message_size = hdr_len + data_len + pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001009
Ira Weiny1471cb62014-08-08 19:00:56 -04001010 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
Ira Weiny548ead12015-06-06 14:38:33 -04001011 if (!rmpp_active && message_size > mad_size)
Ira Weiny1471cb62014-08-08 19:00:56 -04001012 return ERR_PTR(-EINVAL);
1013 } else
Ira Weiny548ead12015-06-06 14:38:33 -04001014 if (rmpp_active || message_size > mad_size)
Ira Weiny1471cb62014-08-08 19:00:56 -04001015 return ERR_PTR(-EINVAL);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001016
Ira Weiny548ead12015-06-06 14:38:33 -04001017 size = rmpp_active ? hdr_len : mad_size;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001018 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001019 if (!buf)
1020 return ERR_PTR(-ENOMEM);
1021
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001022 mad_send_wr = buf + size;
1023 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
Sean Hefty34816ad2005-10-25 10:51:39 -07001024 mad_send_wr->send_buf.mad = buf;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001025 mad_send_wr->send_buf.hdr_len = hdr_len;
1026 mad_send_wr->send_buf.data_len = data_len;
1027 mad_send_wr->pad = pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001028
Sean Hefty34816ad2005-10-25 10:51:39 -07001029 mad_send_wr->mad_agent_priv = mad_agent_priv;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001030 mad_send_wr->sg_list[0].length = hdr_len;
Jason Gunthorpe4be90bc2015-07-30 17:22:16 -06001031 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
Ira Weiny548ead12015-06-06 14:38:33 -04001032
1033 /* OPA MADs don't have to be the full 2048 bytes */
1034 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1035 data_len < mad_size - hdr_len)
1036 mad_send_wr->sg_list[1].length = data_len;
1037 else
1038 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1039
Jason Gunthorpe4be90bc2015-07-30 17:22:16 -06001040 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001041
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001042 mad_send_wr->send_wr.wr.wr_id = (unsigned long) mad_send_wr;
1043 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1044 mad_send_wr->send_wr.wr.num_sge = 2;
1045 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1046 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1047 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1048 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1049 mad_send_wr->send_wr.pkey_index = pkey_index;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001050
1051 if (rmpp_active) {
Ira Weiny548ead12015-06-06 14:38:33 -04001052 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001053 if (ret) {
1054 kfree(buf);
1055 return ERR_PTR(ret);
1056 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001057 }
1058
Sean Hefty34816ad2005-10-25 10:51:39 -07001059 mad_send_wr->send_buf.mad_agent = mad_agent;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001060 atomic_inc(&mad_agent_priv->refcount);
Sean Hefty34816ad2005-10-25 10:51:39 -07001061 return &mad_send_wr->send_buf;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001062}
1063EXPORT_SYMBOL(ib_create_send_mad);
1064
Hal Rosenstock618a3c02006-03-28 16:40:04 -08001065int ib_get_mad_data_offset(u8 mgmt_class)
1066{
1067 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1068 return IB_MGMT_SA_HDR;
1069 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1070 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1071 (mgmt_class == IB_MGMT_CLASS_BIS))
1072 return IB_MGMT_DEVICE_HDR;
1073 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1074 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1075 return IB_MGMT_VENDOR_HDR;
1076 else
1077 return IB_MGMT_MAD_HDR;
1078}
1079EXPORT_SYMBOL(ib_get_mad_data_offset);
1080
1081int ib_is_mad_class_rmpp(u8 mgmt_class)
1082{
1083 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1084 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1085 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1086 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1087 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1088 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1089 return 1;
1090 return 0;
1091}
1092EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1093
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001094void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1095{
1096 struct ib_mad_send_wr_private *mad_send_wr;
1097 struct list_head *list;
1098
1099 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1100 send_buf);
1101 list = &mad_send_wr->cur_seg->list;
1102
1103 if (mad_send_wr->cur_seg->num < seg_num) {
1104 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1105 if (mad_send_wr->cur_seg->num == seg_num)
1106 break;
1107 } else if (mad_send_wr->cur_seg->num > seg_num) {
1108 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1109 if (mad_send_wr->cur_seg->num == seg_num)
1110 break;
1111 }
1112 return mad_send_wr->cur_seg->data;
1113}
1114EXPORT_SYMBOL(ib_get_rmpp_segment);
1115
1116static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1117{
1118 if (mad_send_wr->send_buf.seg_count)
1119 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1120 mad_send_wr->seg_num);
1121 else
1122 return mad_send_wr->send_buf.mad +
1123 mad_send_wr->send_buf.hdr_len;
1124}
1125
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001126void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1127{
1128 struct ib_mad_agent_private *mad_agent_priv;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001129 struct ib_mad_send_wr_private *mad_send_wr;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001130
1131 mad_agent_priv = container_of(send_buf->mad_agent,
1132 struct ib_mad_agent_private, agent);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001133 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1134 send_buf);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001135
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001136 free_send_rmpp_list(mad_send_wr);
1137 kfree(send_buf->mad);
Sean Hefty1b52fa982006-05-12 14:57:52 -07001138 deref_mad_agent(mad_agent_priv);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001139}
1140EXPORT_SYMBOL(ib_free_send_mad);
1141
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001142int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143{
1144 struct ib_mad_qp_info *qp_info;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001145 struct list_head *list;
Sean Hefty34816ad2005-10-25 10:51:39 -07001146 struct ib_send_wr *bad_send_wr;
1147 struct ib_mad_agent *mad_agent;
1148 struct ib_sge *sge;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 unsigned long flags;
1150 int ret;
1151
Hal Rosenstockf8197a42005-07-27 11:45:24 -07001152 /* Set WR ID to find mad_send_wr upon completion */
Hal Rosenstockd760ce82005-07-27 11:45:25 -07001153 qp_info = mad_send_wr->mad_agent_priv->qp_info;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001154 mad_send_wr->send_wr.wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1156
Sean Hefty34816ad2005-10-25 10:51:39 -07001157 mad_agent = mad_send_wr->send_buf.mad_agent;
1158 sge = mad_send_wr->sg_list;
Ralph Campbell15271062006-12-12 14:28:30 -08001159 sge[0].addr = ib_dma_map_single(mad_agent->device,
1160 mad_send_wr->send_buf.mad,
1161 sge[0].length,
1162 DMA_TO_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02001163 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1164 return -ENOMEM;
1165
Ralph Campbell15271062006-12-12 14:28:30 -08001166 mad_send_wr->header_mapping = sge[0].addr;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001167
Ralph Campbell15271062006-12-12 14:28:30 -08001168 sge[1].addr = ib_dma_map_single(mad_agent->device,
1169 ib_get_payload(mad_send_wr),
1170 sge[1].length,
1171 DMA_TO_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02001172 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1173 ib_dma_unmap_single(mad_agent->device,
1174 mad_send_wr->header_mapping,
1175 sge[0].length, DMA_TO_DEVICE);
1176 return -ENOMEM;
1177 }
Ralph Campbell15271062006-12-12 14:28:30 -08001178 mad_send_wr->payload_mapping = sge[1].addr;
Sean Hefty34816ad2005-10-25 10:51:39 -07001179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001181 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001182 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
Sean Hefty34816ad2005-10-25 10:51:39 -07001183 &bad_send_wr);
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001184 list = &qp_info->send_queue.list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 ret = 0;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001187 list = &qp_info->overflow_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 }
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001189
1190 if (!ret) {
1191 qp_info->send_queue.count++;
1192 list_add_tail(&mad_send_wr->mad_list.list, list);
1193 }
1194 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001195 if (ret) {
Ralph Campbell15271062006-12-12 14:28:30 -08001196 ib_dma_unmap_single(mad_agent->device,
1197 mad_send_wr->header_mapping,
1198 sge[0].length, DMA_TO_DEVICE);
1199 ib_dma_unmap_single(mad_agent->device,
1200 mad_send_wr->payload_mapping,
1201 sge[1].length, DMA_TO_DEVICE);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 return ret;
1204}
1205
1206/*
1207 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1208 * with the registered client
1209 */
Sean Hefty34816ad2005-10-25 10:51:39 -07001210int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1211 struct ib_mad_send_buf **bad_send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 struct ib_mad_agent_private *mad_agent_priv;
Sean Hefty34816ad2005-10-25 10:51:39 -07001214 struct ib_mad_send_buf *next_send_buf;
1215 struct ib_mad_send_wr_private *mad_send_wr;
1216 unsigned long flags;
1217 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
1219 /* Walk list of send WRs and post each on send list */
Sean Hefty34816ad2005-10-25 10:51:39 -07001220 for (; send_buf; send_buf = next_send_buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
Sean Hefty34816ad2005-10-25 10:51:39 -07001222 mad_send_wr = container_of(send_buf,
1223 struct ib_mad_send_wr_private,
1224 send_buf);
1225 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
Sean Hefty34816ad2005-10-25 10:51:39 -07001227 if (!send_buf->mad_agent->send_handler ||
1228 (send_buf->timeout_ms &&
1229 !send_buf->mad_agent->recv_handler)) {
1230 ret = -EINVAL;
1231 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 }
1233
Hal Rosenstock618a3c02006-03-28 16:40:04 -08001234 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1235 if (mad_agent_priv->agent.rmpp_version) {
1236 ret = -EINVAL;
1237 goto error;
1238 }
1239 }
1240
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 /*
1242 * Save pointer to next work request to post in case the
1243 * current one completes, and the user modifies the work
1244 * request associated with the completion
1245 */
Sean Hefty34816ad2005-10-25 10:51:39 -07001246 next_send_buf = send_buf->next;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001247 mad_send_wr->send_wr.ah = send_buf->ah;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
Sean Hefty34816ad2005-10-25 10:51:39 -07001249 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1250 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1251 ret = handle_outgoing_dr_smp(mad_agent_priv,
1252 mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 if (ret < 0) /* error */
Sean Hefty34816ad2005-10-25 10:51:39 -07001254 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 else if (ret == 1) /* locally consumed */
Sean Hefty34816ad2005-10-25 10:51:39 -07001256 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 }
1258
Sean Hefty34816ad2005-10-25 10:51:39 -07001259 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 /* Timeout will be updated after send completes */
Sean Hefty34816ad2005-10-25 10:51:39 -07001261 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
Sean Hefty4fc8cd42007-11-27 00:11:04 -08001262 mad_send_wr->max_retries = send_buf->retries;
1263 mad_send_wr->retries_left = send_buf->retries;
1264 send_buf->retries = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001265 /* Reference for work request to QP + response */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1267 mad_send_wr->status = IB_WC_SUCCESS;
1268
1269 /* Reference MAD agent until send completes */
1270 atomic_inc(&mad_agent_priv->refcount);
1271 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1272 list_add_tail(&mad_send_wr->agent_list,
1273 &mad_agent_priv->send_list);
1274 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1275
Ira Weiny1471cb62014-08-08 19:00:56 -04001276 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001277 ret = ib_send_rmpp_mad(mad_send_wr);
1278 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1279 ret = ib_send_mad(mad_send_wr);
1280 } else
1281 ret = ib_send_mad(mad_send_wr);
1282 if (ret < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 /* Fail send request */
1284 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1285 list_del(&mad_send_wr->agent_list);
1286 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1287 atomic_dec(&mad_agent_priv->refcount);
Sean Hefty34816ad2005-10-25 10:51:39 -07001288 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 }
1291 return 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001292error:
1293 if (bad_send_buf)
1294 *bad_send_buf = send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 return ret;
1296}
1297EXPORT_SYMBOL(ib_post_send_mad);
1298
1299/*
1300 * ib_free_recv_mad - Returns data buffers used to receive
1301 * a MAD to the access layer
1302 */
1303void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1304{
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001305 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 struct ib_mad_private_header *mad_priv_hdr;
1307 struct ib_mad_private *priv;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001308 struct list_head free_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001310 INIT_LIST_HEAD(&free_list);
1311 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001313 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1314 &free_list, list) {
1315 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1316 recv_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 mad_priv_hdr = container_of(mad_recv_wc,
1318 struct ib_mad_private_header,
1319 recv_wc);
1320 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1321 header);
Ira Weinyc9082e52015-06-06 14:38:30 -04001322 kfree(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324}
1325EXPORT_SYMBOL(ib_free_recv_mad);
1326
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1328 u8 rmpp_version,
1329 ib_mad_send_handler send_handler,
1330 ib_mad_recv_handler recv_handler,
1331 void *context)
1332{
1333 return ERR_PTR(-EINVAL); /* XXX: for now */
1334}
1335EXPORT_SYMBOL(ib_redirect_mad_qp);
1336
1337int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1338 struct ib_wc *wc)
1339{
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001340 dev_err(&mad_agent->device->dev,
1341 "ib_process_mad_wc() not implemented yet\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 return 0;
1343}
1344EXPORT_SYMBOL(ib_process_mad_wc);
1345
1346static int method_in_use(struct ib_mad_mgmt_method_table **method,
1347 struct ib_mad_reg_req *mad_reg_req)
1348{
1349 int i;
1350
Akinobu Mita19b629f2010-03-05 13:41:38 -08001351 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 if ((*method)->agent[i]) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001353 pr_err("Method %d already in use\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 return -EINVAL;
1355 }
1356 }
1357 return 0;
1358}
1359
1360static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1361{
1362 /* Allocate management method table */
Roland Dreierde6eb662005-11-02 07:23:14 -08001363 *method = kzalloc(sizeof **method, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 if (!*method) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001365 pr_err("No memory for ib_mad_mgmt_method_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 return -ENOMEM;
1367 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368
1369 return 0;
1370}
1371
1372/*
1373 * Check to see if there are any methods still in use
1374 */
1375static int check_method_table(struct ib_mad_mgmt_method_table *method)
1376{
1377 int i;
1378
1379 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1380 if (method->agent[i])
1381 return 1;
1382 return 0;
1383}
1384
1385/*
1386 * Check to see if there are any method tables for this class still in use
1387 */
1388static int check_class_table(struct ib_mad_mgmt_class_table *class)
1389{
1390 int i;
1391
1392 for (i = 0; i < MAX_MGMT_CLASS; i++)
1393 if (class->method_table[i])
1394 return 1;
1395 return 0;
1396}
1397
1398static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1399{
1400 int i;
1401
1402 for (i = 0; i < MAX_MGMT_OUI; i++)
1403 if (vendor_class->method_table[i])
1404 return 1;
1405 return 0;
1406}
1407
1408static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
Ira Weinyd94bd262015-06-06 14:38:22 -04001409 const char *oui)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410{
1411 int i;
1412
1413 for (i = 0; i < MAX_MGMT_OUI; i++)
Roland Dreier3cd96562006-09-22 15:22:46 -07001414 /* Is there matching OUI for this vendor class ? */
1415 if (!memcmp(vendor_class->oui[i], oui, 3))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 return i;
1417
1418 return -1;
1419}
1420
1421static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1422{
1423 int i;
1424
1425 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1426 if (vendor->vendor_class[i])
1427 return 1;
1428
1429 return 0;
1430}
1431
1432static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1433 struct ib_mad_agent_private *agent)
1434{
1435 int i;
1436
1437 /* Remove any methods for this mad agent */
1438 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1439 if (method->agent[i] == agent) {
1440 method->agent[i] = NULL;
1441 }
1442 }
1443}
1444
1445static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1446 struct ib_mad_agent_private *agent_priv,
1447 u8 mgmt_class)
1448{
1449 struct ib_mad_port_private *port_priv;
1450 struct ib_mad_mgmt_class_table **class;
1451 struct ib_mad_mgmt_method_table **method;
1452 int i, ret;
1453
1454 port_priv = agent_priv->qp_info->port_priv;
1455 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1456 if (!*class) {
1457 /* Allocate management class table for "new" class version */
Roland Dreierde6eb662005-11-02 07:23:14 -08001458 *class = kzalloc(sizeof **class, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 if (!*class) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001460 dev_err(&agent_priv->agent.device->dev,
1461 "No memory for ib_mad_mgmt_class_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 ret = -ENOMEM;
1463 goto error1;
1464 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001465
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 /* Allocate method table for this management class */
1467 method = &(*class)->method_table[mgmt_class];
1468 if ((ret = allocate_method_table(method)))
1469 goto error2;
1470 } else {
1471 method = &(*class)->method_table[mgmt_class];
1472 if (!*method) {
1473 /* Allocate method table for this management class */
1474 if ((ret = allocate_method_table(method)))
1475 goto error1;
1476 }
1477 }
1478
1479 /* Now, make sure methods are not already in use */
1480 if (method_in_use(method, mad_reg_req))
1481 goto error3;
1482
1483 /* Finally, add in methods being registered */
Akinobu Mita19b629f2010-03-05 13:41:38 -08001484 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 (*method)->agent[i] = agent_priv;
Akinobu Mita19b629f2010-03-05 13:41:38 -08001486
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 return 0;
1488
1489error3:
1490 /* Remove any methods for this mad agent */
1491 remove_methods_mad_agent(*method, agent_priv);
1492 /* Now, check to see if there are any methods in use */
1493 if (!check_method_table(*method)) {
1494 /* If not, release management method table */
1495 kfree(*method);
1496 *method = NULL;
1497 }
1498 ret = -EINVAL;
1499 goto error1;
1500error2:
1501 kfree(*class);
1502 *class = NULL;
1503error1:
1504 return ret;
1505}
1506
1507static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1508 struct ib_mad_agent_private *agent_priv)
1509{
1510 struct ib_mad_port_private *port_priv;
1511 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1512 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1513 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1514 struct ib_mad_mgmt_method_table **method;
1515 int i, ret = -ENOMEM;
1516 u8 vclass;
1517
1518 /* "New" vendor (with OUI) class */
1519 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1520 port_priv = agent_priv->qp_info->port_priv;
1521 vendor_table = &port_priv->version[
1522 mad_reg_req->mgmt_class_version].vendor;
1523 if (!*vendor_table) {
1524 /* Allocate mgmt vendor class table for "new" class version */
Roland Dreierde6eb662005-11-02 07:23:14 -08001525 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 if (!vendor) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001527 dev_err(&agent_priv->agent.device->dev,
1528 "No memory for ib_mad_mgmt_vendor_class_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 goto error1;
1530 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001531
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 *vendor_table = vendor;
1533 }
1534 if (!(*vendor_table)->vendor_class[vclass]) {
1535 /* Allocate table for this management vendor class */
Roland Dreierde6eb662005-11-02 07:23:14 -08001536 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 if (!vendor_class) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001538 dev_err(&agent_priv->agent.device->dev,
1539 "No memory for ib_mad_mgmt_vendor_class\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 goto error2;
1541 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001542
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 (*vendor_table)->vendor_class[vclass] = vendor_class;
1544 }
1545 for (i = 0; i < MAX_MGMT_OUI; i++) {
1546 /* Is there matching OUI for this vendor class ? */
1547 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1548 mad_reg_req->oui, 3)) {
1549 method = &(*vendor_table)->vendor_class[
1550 vclass]->method_table[i];
1551 BUG_ON(!*method);
1552 goto check_in_use;
1553 }
1554 }
1555 for (i = 0; i < MAX_MGMT_OUI; i++) {
1556 /* OUI slot available ? */
1557 if (!is_vendor_oui((*vendor_table)->vendor_class[
1558 vclass]->oui[i])) {
1559 method = &(*vendor_table)->vendor_class[
1560 vclass]->method_table[i];
1561 BUG_ON(*method);
1562 /* Allocate method table for this OUI */
1563 if ((ret = allocate_method_table(method)))
1564 goto error3;
1565 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1566 mad_reg_req->oui, 3);
1567 goto check_in_use;
1568 }
1569 }
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001570 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 goto error3;
1572
1573check_in_use:
1574 /* Now, make sure methods are not already in use */
1575 if (method_in_use(method, mad_reg_req))
1576 goto error4;
1577
1578 /* Finally, add in methods being registered */
Akinobu Mita19b629f2010-03-05 13:41:38 -08001579 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 (*method)->agent[i] = agent_priv;
Akinobu Mita19b629f2010-03-05 13:41:38 -08001581
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 return 0;
1583
1584error4:
1585 /* Remove any methods for this mad agent */
1586 remove_methods_mad_agent(*method, agent_priv);
1587 /* Now, check to see if there are any methods in use */
1588 if (!check_method_table(*method)) {
1589 /* If not, release management method table */
1590 kfree(*method);
1591 *method = NULL;
1592 }
1593 ret = -EINVAL;
1594error3:
1595 if (vendor_class) {
1596 (*vendor_table)->vendor_class[vclass] = NULL;
1597 kfree(vendor_class);
1598 }
1599error2:
1600 if (vendor) {
1601 *vendor_table = NULL;
1602 kfree(vendor);
1603 }
1604error1:
1605 return ret;
1606}
1607
1608static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1609{
1610 struct ib_mad_port_private *port_priv;
1611 struct ib_mad_mgmt_class_table *class;
1612 struct ib_mad_mgmt_method_table *method;
1613 struct ib_mad_mgmt_vendor_class_table *vendor;
1614 struct ib_mad_mgmt_vendor_class *vendor_class;
1615 int index;
1616 u8 mgmt_class;
1617
1618 /*
1619 * Was MAD registration request supplied
1620 * with original registration ?
1621 */
1622 if (!agent_priv->reg_req) {
1623 goto out;
1624 }
1625
1626 port_priv = agent_priv->qp_info->port_priv;
1627 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1628 class = port_priv->version[
1629 agent_priv->reg_req->mgmt_class_version].class;
1630 if (!class)
1631 goto vendor_check;
1632
1633 method = class->method_table[mgmt_class];
1634 if (method) {
1635 /* Remove any methods for this mad agent */
1636 remove_methods_mad_agent(method, agent_priv);
1637 /* Now, check to see if there are any methods still in use */
1638 if (!check_method_table(method)) {
1639 /* If not, release management method table */
1640 kfree(method);
1641 class->method_table[mgmt_class] = NULL;
1642 /* Any management classes left ? */
1643 if (!check_class_table(class)) {
1644 /* If not, release management class table */
1645 kfree(class);
1646 port_priv->version[
1647 agent_priv->reg_req->
1648 mgmt_class_version].class = NULL;
1649 }
1650 }
1651 }
1652
1653vendor_check:
1654 if (!is_vendor_class(mgmt_class))
1655 goto out;
1656
1657 /* normalize mgmt_class to vendor range 2 */
1658 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1659 vendor = port_priv->version[
1660 agent_priv->reg_req->mgmt_class_version].vendor;
1661
1662 if (!vendor)
1663 goto out;
1664
1665 vendor_class = vendor->vendor_class[mgmt_class];
1666 if (vendor_class) {
1667 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1668 if (index < 0)
1669 goto out;
1670 method = vendor_class->method_table[index];
1671 if (method) {
1672 /* Remove any methods for this mad agent */
1673 remove_methods_mad_agent(method, agent_priv);
1674 /*
1675 * Now, check to see if there are
1676 * any methods still in use
1677 */
1678 if (!check_method_table(method)) {
1679 /* If not, release management method table */
1680 kfree(method);
1681 vendor_class->method_table[index] = NULL;
1682 memset(vendor_class->oui[index], 0, 3);
1683 /* Any OUIs left ? */
1684 if (!check_vendor_class(vendor_class)) {
1685 /* If not, release vendor class table */
1686 kfree(vendor_class);
1687 vendor->vendor_class[mgmt_class] = NULL;
1688 /* Any other vendor classes left ? */
1689 if (!check_vendor_table(vendor)) {
1690 kfree(vendor);
1691 port_priv->version[
1692 agent_priv->reg_req->
1693 mgmt_class_version].
1694 vendor = NULL;
1695 }
1696 }
1697 }
1698 }
1699 }
1700
1701out:
1702 return;
1703}
1704
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705static struct ib_mad_agent_private *
1706find_mad_agent(struct ib_mad_port_private *port_priv,
Ira Weinyd94bd262015-06-06 14:38:22 -04001707 const struct ib_mad_hdr *mad_hdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708{
1709 struct ib_mad_agent_private *mad_agent = NULL;
1710 unsigned long flags;
1711
1712 spin_lock_irqsave(&port_priv->reg_lock, flags);
Ira Weinyd94bd262015-06-06 14:38:22 -04001713 if (ib_response_mad(mad_hdr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 u32 hi_tid;
1715 struct ib_mad_agent_private *entry;
1716
1717 /*
1718 * Routing is based on high 32 bits of transaction ID
1719 * of MAD.
1720 */
Ira Weinyd94bd262015-06-06 14:38:22 -04001721 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
Sean Hefty34816ad2005-10-25 10:51:39 -07001722 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 if (entry->agent.hi_tid == hi_tid) {
1724 mad_agent = entry;
1725 break;
1726 }
1727 }
1728 } else {
1729 struct ib_mad_mgmt_class_table *class;
1730 struct ib_mad_mgmt_method_table *method;
1731 struct ib_mad_mgmt_vendor_class_table *vendor;
1732 struct ib_mad_mgmt_vendor_class *vendor_class;
Ira Weinyd94bd262015-06-06 14:38:22 -04001733 const struct ib_vendor_mad *vendor_mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 int index;
1735
1736 /*
1737 * Routing is based on version, class, and method
1738 * For "newer" vendor MADs, also based on OUI
1739 */
Ira Weinyd94bd262015-06-06 14:38:22 -04001740 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 goto out;
Ira Weinyd94bd262015-06-06 14:38:22 -04001742 if (!is_vendor_class(mad_hdr->mgmt_class)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 class = port_priv->version[
Ira Weinyd94bd262015-06-06 14:38:22 -04001744 mad_hdr->class_version].class;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 if (!class)
1746 goto out;
Ira Weinyd94bd262015-06-06 14:38:22 -04001747 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
Hefty, Seanb7ab0b12011-10-06 09:33:05 -07001748 IB_MGMT_MAX_METHODS)
1749 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 method = class->method_table[convert_mgmt_class(
Ira Weinyd94bd262015-06-06 14:38:22 -04001751 mad_hdr->mgmt_class)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 if (method)
Ira Weinyd94bd262015-06-06 14:38:22 -04001753 mad_agent = method->agent[mad_hdr->method &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 ~IB_MGMT_METHOD_RESP];
1755 } else {
1756 vendor = port_priv->version[
Ira Weinyd94bd262015-06-06 14:38:22 -04001757 mad_hdr->class_version].vendor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 if (!vendor)
1759 goto out;
1760 vendor_class = vendor->vendor_class[vendor_class_index(
Ira Weinyd94bd262015-06-06 14:38:22 -04001761 mad_hdr->mgmt_class)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 if (!vendor_class)
1763 goto out;
1764 /* Find matching OUI */
Ira Weinyd94bd262015-06-06 14:38:22 -04001765 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1767 if (index == -1)
1768 goto out;
1769 method = vendor_class->method_table[index];
1770 if (method) {
Ira Weinyd94bd262015-06-06 14:38:22 -04001771 mad_agent = method->agent[mad_hdr->method &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 ~IB_MGMT_METHOD_RESP];
1773 }
1774 }
1775 }
1776
1777 if (mad_agent) {
1778 if (mad_agent->agent.recv_handler)
1779 atomic_inc(&mad_agent->refcount);
1780 else {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001781 dev_notice(&port_priv->device->dev,
1782 "No receive handler for client %p on port %d\n",
1783 &mad_agent->agent, port_priv->port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 mad_agent = NULL;
1785 }
1786 }
1787out:
1788 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1789
1790 return mad_agent;
1791}
1792
Ira Weiny8e4349d2015-06-10 16:16:48 -04001793static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1794 const struct ib_mad_qp_info *qp_info,
1795 bool opa)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796{
1797 int valid = 0;
Ira Weiny8e4349d2015-06-10 16:16:48 -04001798 u32 qp_num = qp_info->qp->qp_num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799
1800 /* Make sure MAD base version is understood */
Ira Weiny8e4349d2015-06-10 16:16:48 -04001801 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1802 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1803 pr_err("MAD received with unsupported base version %d %s\n",
1804 mad_hdr->base_version, opa ? "(opa)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 goto out;
1806 }
1807
1808 /* Filter SMI packets sent to other than QP0 */
Ira Weiny77f60832015-05-08 14:27:21 -04001809 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1810 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 if (qp_num == 0)
1812 valid = 1;
1813 } else {
Hal Rosenstock53370882015-11-13 15:22:22 -05001814 /* CM attributes other than ClassPortInfo only use Send method */
1815 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1816 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1817 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1818 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 /* Filter GSI packets sent to QP0 */
1820 if (qp_num != 0)
1821 valid = 1;
1822 }
1823
1824out:
1825 return valid;
1826}
1827
Ira Weinyf766c582015-05-08 14:27:24 -04001828static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1829 const struct ib_mad_hdr *mad_hdr)
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001830{
1831 struct ib_rmpp_mad *rmpp_mad;
1832
1833 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1834 return !mad_agent_priv->agent.rmpp_version ||
Ira Weiny1471cb62014-08-08 19:00:56 -04001835 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001836 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1837 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1838 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1839}
1840
Ira Weiny8bf4b302015-05-08 14:27:23 -04001841static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1842 const struct ib_mad_recv_wc *rwc)
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001843{
Ira Weiny8bf4b302015-05-08 14:27:23 -04001844 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001845 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1846}
1847
Ira Weinyf766c582015-05-08 14:27:24 -04001848static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1849 const struct ib_mad_send_wr_private *wr,
1850 const struct ib_mad_recv_wc *rwc )
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001851{
1852 struct ib_ah_attr attr;
1853 u8 send_resp, rcv_resp;
Jack Morgenstein9874e742006-06-17 20:37:34 -07001854 union ib_gid sgid;
1855 struct ib_device *device = mad_agent_priv->agent.device;
1856 u8 port_num = mad_agent_priv->agent.port_num;
1857 u8 lmc;
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001858
Ira Weiny96909302015-05-08 14:27:22 -04001859 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1860 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001861
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001862 if (send_resp == rcv_resp)
1863 /* both requests, or both responses. GIDs different */
1864 return 0;
1865
1866 if (ib_query_ah(wr->send_buf.ah, &attr))
1867 /* Assume not equal, to avoid false positives. */
1868 return 0;
1869
Jack Morgenstein9874e742006-06-17 20:37:34 -07001870 if (!!(attr.ah_flags & IB_AH_GRH) !=
1871 !!(rwc->wc->wc_flags & IB_WC_GRH))
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001872 /* one has GID, other does not. Assume different */
1873 return 0;
Jack Morgenstein9874e742006-06-17 20:37:34 -07001874
1875 if (!send_resp && rcv_resp) {
1876 /* is request/response. */
1877 if (!(attr.ah_flags & IB_AH_GRH)) {
1878 if (ib_get_cached_lmc(device, port_num, &lmc))
1879 return 0;
1880 return (!lmc || !((attr.src_path_bits ^
1881 rwc->wc->dlid_path_bits) &
1882 ((1 << lmc) - 1)));
1883 } else {
1884 if (ib_get_cached_gid(device, port_num,
Matan Barak55ee3ab2015-10-15 18:38:45 +03001885 attr.grh.sgid_index, &sgid, NULL))
Jack Morgenstein9874e742006-06-17 20:37:34 -07001886 return 0;
1887 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1888 16);
1889 }
1890 }
1891
1892 if (!(attr.ah_flags & IB_AH_GRH))
1893 return attr.dlid == rwc->wc->slid;
1894 else
1895 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1896 16);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001897}
Jack Morgenstein9874e742006-06-17 20:37:34 -07001898
1899static inline int is_direct(u8 class)
1900{
1901 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1902}
1903
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001904struct ib_mad_send_wr_private*
Ira Weinyf766c582015-05-08 14:27:24 -04001905ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1906 const struct ib_mad_recv_wc *wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907{
Jack Morgenstein9874e742006-06-17 20:37:34 -07001908 struct ib_mad_send_wr_private *wr;
Ira Weiny83a1d222015-06-06 14:38:23 -04001909 const struct ib_mad_hdr *mad_hdr;
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001910
Ira Weiny83a1d222015-06-06 14:38:23 -04001911 mad_hdr = &wc->recv_buf.mad->mad_hdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
Jack Morgenstein9874e742006-06-17 20:37:34 -07001913 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
Ira Weiny83a1d222015-06-06 14:38:23 -04001914 if ((wr->tid == mad_hdr->tid) &&
Jack Morgenstein9874e742006-06-17 20:37:34 -07001915 rcv_has_same_class(wr, wc) &&
1916 /*
1917 * Don't check GID for direct routed MADs.
1918 * These might have permissive LIDs.
1919 */
Ira Weiny83a1d222015-06-06 14:38:23 -04001920 (is_direct(mad_hdr->mgmt_class) ||
Jack Morgenstein9874e742006-06-17 20:37:34 -07001921 rcv_has_same_gid(mad_agent_priv, wr, wc)))
Roland Dreier39798692006-11-13 09:38:07 -08001922 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 }
1924
1925 /*
1926 * It's possible to receive the response before we've
1927 * been notified that the send has completed
1928 */
Jack Morgenstein9874e742006-06-17 20:37:34 -07001929 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
Ira Weinyc597eee2015-05-08 13:10:03 -04001930 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
Ira Weiny83a1d222015-06-06 14:38:23 -04001931 wr->tid == mad_hdr->tid &&
Jack Morgenstein9874e742006-06-17 20:37:34 -07001932 wr->timeout &&
1933 rcv_has_same_class(wr, wc) &&
1934 /*
1935 * Don't check GID for direct routed MADs.
1936 * These might have permissive LIDs.
1937 */
Ira Weiny83a1d222015-06-06 14:38:23 -04001938 (is_direct(mad_hdr->mgmt_class) ||
Jack Morgenstein9874e742006-06-17 20:37:34 -07001939 rcv_has_same_gid(mad_agent_priv, wr, wc)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 /* Verify request has not been canceled */
Jack Morgenstein9874e742006-06-17 20:37:34 -07001941 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 }
1943 return NULL;
1944}
1945
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001946void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001947{
1948 mad_send_wr->timeout = 0;
Akinobu Mita179e0912006-06-26 00:24:41 -07001949 if (mad_send_wr->refcount == 1)
1950 list_move_tail(&mad_send_wr->agent_list,
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001951 &mad_send_wr->mad_agent_priv->done_list);
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001952}
1953
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001955 struct ib_mad_recv_wc *mad_recv_wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956{
1957 struct ib_mad_send_wr_private *mad_send_wr;
1958 struct ib_mad_send_wc mad_send_wc;
1959 unsigned long flags;
1960
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001961 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1962 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
Ira Weiny1471cb62014-08-08 19:00:56 -04001963 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001964 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1965 mad_recv_wc);
1966 if (!mad_recv_wc) {
Sean Hefty1b52fa982006-05-12 14:57:52 -07001967 deref_mad_agent(mad_agent_priv);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001968 return;
1969 }
1970 }
1971
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 /* Complete corresponding request */
Ira Weiny96909302015-05-08 14:27:22 -04001973 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001975 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 if (!mad_send_wr) {
1977 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Ira Weiny1471cb62014-08-08 19:00:56 -04001978 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1979 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1980 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1981 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1982 /* user rmpp is in effect
1983 * and this is an active RMPP MAD
1984 */
1985 mad_recv_wc->wc->wr_id = 0;
1986 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1987 mad_recv_wc);
1988 atomic_dec(&mad_agent_priv->refcount);
1989 } else {
1990 /* not user rmpp, revert to normal behavior and
1991 * drop the mad */
1992 ib_free_recv_mad(mad_recv_wc);
1993 deref_mad_agent(mad_agent_priv);
1994 return;
1995 }
1996 } else {
1997 ib_mark_mad_done(mad_send_wr);
1998 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1999
2000 /* Defined behavior is to complete response before request */
2001 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
2002 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
2003 mad_recv_wc);
2004 atomic_dec(&mad_agent_priv->refcount);
2005
2006 mad_send_wc.status = IB_WC_SUCCESS;
2007 mad_send_wc.vendor_err = 0;
2008 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2009 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 } else {
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07002012 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
2013 mad_recv_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -07002014 deref_mad_agent(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 }
2016}
2017
Ira Weinye11ae8a2015-06-06 14:38:24 -04002018static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2019 const struct ib_mad_qp_info *qp_info,
2020 const struct ib_wc *wc,
2021 int port_num,
2022 struct ib_mad_private *recv,
2023 struct ib_mad_private *response)
2024{
2025 enum smi_forward_action retsmi;
Ira Weinyc9082e52015-06-06 14:38:30 -04002026 struct ib_smp *smp = (struct ib_smp *)recv->mad;
Ira Weinye11ae8a2015-06-06 14:38:24 -04002027
Ira Weinyc9082e52015-06-06 14:38:30 -04002028 if (smi_handle_dr_smp_recv(smp,
Hal Rosenstock41390322015-06-29 09:57:00 -04002029 rdma_cap_ib_switch(port_priv->device),
Ira Weinye11ae8a2015-06-06 14:38:24 -04002030 port_num,
2031 port_priv->device->phys_port_cnt) ==
2032 IB_SMI_DISCARD)
2033 return IB_SMI_DISCARD;
2034
Ira Weinyc9082e52015-06-06 14:38:30 -04002035 retsmi = smi_check_forward_dr_smp(smp);
Ira Weinye11ae8a2015-06-06 14:38:24 -04002036 if (retsmi == IB_SMI_LOCAL)
2037 return IB_SMI_HANDLE;
2038
2039 if (retsmi == IB_SMI_SEND) { /* don't forward */
Ira Weinyc9082e52015-06-06 14:38:30 -04002040 if (smi_handle_dr_smp_send(smp,
Hal Rosenstock41390322015-06-29 09:57:00 -04002041 rdma_cap_ib_switch(port_priv->device),
Ira Weinye11ae8a2015-06-06 14:38:24 -04002042 port_num) == IB_SMI_DISCARD)
2043 return IB_SMI_DISCARD;
2044
Ira Weinyc9082e52015-06-06 14:38:30 -04002045 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
Ira Weinye11ae8a2015-06-06 14:38:24 -04002046 return IB_SMI_DISCARD;
Hal Rosenstock41390322015-06-29 09:57:00 -04002047 } else if (rdma_cap_ib_switch(port_priv->device)) {
Ira Weinye11ae8a2015-06-06 14:38:24 -04002048 /* forward case for switches */
Ira Weinyc9082e52015-06-06 14:38:30 -04002049 memcpy(response, recv, mad_priv_size(response));
Ira Weinye11ae8a2015-06-06 14:38:24 -04002050 response->header.recv_wc.wc = &response->header.wc;
Ira Weinyc9082e52015-06-06 14:38:30 -04002051 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
Ira Weinye11ae8a2015-06-06 14:38:24 -04002052 response->header.recv_wc.recv_buf.grh = &response->grh;
2053
Ira Weinyc9082e52015-06-06 14:38:30 -04002054 agent_send_response((const struct ib_mad_hdr *)response->mad,
Ira Weinye11ae8a2015-06-06 14:38:24 -04002055 &response->grh, wc,
2056 port_priv->device,
Ira Weinyc9082e52015-06-06 14:38:30 -04002057 smi_get_fwd_port(smp),
2058 qp_info->qp->qp_num,
Ira Weiny8e4349d2015-06-10 16:16:48 -04002059 response->mad_size,
2060 false);
Ira Weinye11ae8a2015-06-06 14:38:24 -04002061
2062 return IB_SMI_DISCARD;
2063 }
2064 return IB_SMI_HANDLE;
2065}
2066
Ira Weinyc9082e52015-06-06 14:38:30 -04002067static bool generate_unmatched_resp(const struct ib_mad_private *recv,
Ira Weiny8e4349d2015-06-10 16:16:48 -04002068 struct ib_mad_private *response,
2069 size_t *resp_len, bool opa)
Swapna Thete0b307042012-02-25 17:47:32 -08002070{
Ira Weinyc9082e52015-06-06 14:38:30 -04002071 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2072 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2073
2074 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2075 recv_hdr->method == IB_MGMT_METHOD_SET) {
2076 memcpy(response, recv, mad_priv_size(response));
Swapna Thete0b307042012-02-25 17:47:32 -08002077 response->header.recv_wc.wc = &response->header.wc;
Ira Weinyc9082e52015-06-06 14:38:30 -04002078 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
Swapna Thete0b307042012-02-25 17:47:32 -08002079 response->header.recv_wc.recv_buf.grh = &response->grh;
Ira Weinyc9082e52015-06-06 14:38:30 -04002080 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2081 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2082 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2083 resp_hdr->status |= IB_SMP_DIRECTION;
Swapna Thete0b307042012-02-25 17:47:32 -08002084
Ira Weiny8e4349d2015-06-10 16:16:48 -04002085 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2086 if (recv_hdr->mgmt_class ==
2087 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2088 recv_hdr->mgmt_class ==
2089 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2090 *resp_len = opa_get_smp_header_size(
2091 (struct opa_smp *)recv->mad);
2092 else
2093 *resp_len = sizeof(struct ib_mad_hdr);
2094 }
2095
Swapna Thete0b307042012-02-25 17:47:32 -08002096 return true;
2097 } else {
2098 return false;
2099 }
2100}
Ira Weiny8e4349d2015-06-10 16:16:48 -04002101
2102static enum smi_action
2103handle_opa_smi(struct ib_mad_port_private *port_priv,
2104 struct ib_mad_qp_info *qp_info,
2105 struct ib_wc *wc,
2106 int port_num,
2107 struct ib_mad_private *recv,
2108 struct ib_mad_private *response)
2109{
2110 enum smi_forward_action retsmi;
2111 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2112
2113 if (opa_smi_handle_dr_smp_recv(smp,
Hal Rosenstock41390322015-06-29 09:57:00 -04002114 rdma_cap_ib_switch(port_priv->device),
Ira Weiny8e4349d2015-06-10 16:16:48 -04002115 port_num,
2116 port_priv->device->phys_port_cnt) ==
2117 IB_SMI_DISCARD)
2118 return IB_SMI_DISCARD;
2119
2120 retsmi = opa_smi_check_forward_dr_smp(smp);
2121 if (retsmi == IB_SMI_LOCAL)
2122 return IB_SMI_HANDLE;
2123
2124 if (retsmi == IB_SMI_SEND) { /* don't forward */
2125 if (opa_smi_handle_dr_smp_send(smp,
Hal Rosenstock41390322015-06-29 09:57:00 -04002126 rdma_cap_ib_switch(port_priv->device),
Ira Weiny8e4349d2015-06-10 16:16:48 -04002127 port_num) == IB_SMI_DISCARD)
2128 return IB_SMI_DISCARD;
2129
2130 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2131 IB_SMI_DISCARD)
2132 return IB_SMI_DISCARD;
2133
Hal Rosenstock41390322015-06-29 09:57:00 -04002134 } else if (rdma_cap_ib_switch(port_priv->device)) {
Ira Weiny8e4349d2015-06-10 16:16:48 -04002135 /* forward case for switches */
2136 memcpy(response, recv, mad_priv_size(response));
2137 response->header.recv_wc.wc = &response->header.wc;
2138 response->header.recv_wc.recv_buf.opa_mad =
2139 (struct opa_mad *)response->mad;
2140 response->header.recv_wc.recv_buf.grh = &response->grh;
2141
2142 agent_send_response((const struct ib_mad_hdr *)response->mad,
2143 &response->grh, wc,
2144 port_priv->device,
2145 opa_smi_get_fwd_port(smp),
2146 qp_info->qp->qp_num,
2147 recv->header.wc.byte_len,
2148 true);
2149
2150 return IB_SMI_DISCARD;
2151 }
2152
2153 return IB_SMI_HANDLE;
2154}
2155
2156static enum smi_action
2157handle_smi(struct ib_mad_port_private *port_priv,
2158 struct ib_mad_qp_info *qp_info,
2159 struct ib_wc *wc,
2160 int port_num,
2161 struct ib_mad_private *recv,
2162 struct ib_mad_private *response,
2163 bool opa)
2164{
2165 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2166
2167 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2168 mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
2169 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2170 response);
2171
2172 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2173}
2174
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
2176 struct ib_wc *wc)
2177{
2178 struct ib_mad_qp_info *qp_info;
2179 struct ib_mad_private_header *mad_priv_hdr;
Hal Rosenstock445d6802007-08-03 10:45:17 -07002180 struct ib_mad_private *recv, *response = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 struct ib_mad_list_head *mad_list;
2182 struct ib_mad_agent_private *mad_agent;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002183 int port_num;
Jack Morgensteina9e74322012-04-24 16:08:57 -07002184 int ret = IB_MAD_RESULT_SUCCESS;
Ira Weiny4cd7c942015-06-06 14:38:31 -04002185 size_t mad_size;
2186 u16 resp_mad_pkey_index = 0;
Ira Weiny8e4349d2015-06-10 16:16:48 -04002187 bool opa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2190 qp_info = mad_list->mad_queue->qp_info;
2191 dequeue_mad(mad_list);
2192
Ira Weiny8e4349d2015-06-10 16:16:48 -04002193 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2194 qp_info->port_priv->port_num);
2195
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2197 mad_list);
2198 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
Ralph Campbell15271062006-12-12 14:28:30 -08002199 ib_dma_unmap_single(port_priv->device,
2200 recv->header.mapping,
Ira Weinyc9082e52015-06-06 14:38:30 -04002201 mad_priv_dma_size(recv),
Ralph Campbell15271062006-12-12 14:28:30 -08002202 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203
2204 /* Setup MAD receive work completion from "normal" work completion */
Sean Hefty24239af2005-04-16 15:26:08 -07002205 recv->header.wc = *wc;
2206 recv->header.recv_wc.wc = &recv->header.wc;
Ira Weiny8e4349d2015-06-10 16:16:48 -04002207
2208 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2209 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2210 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2211 } else {
2212 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2213 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2214 }
2215
Ira Weinyc9082e52015-06-06 14:38:30 -04002216 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2218
2219 if (atomic_read(&qp_info->snoop_count))
2220 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2221
2222 /* Validate MAD */
Ira Weiny8e4349d2015-06-10 16:16:48 -04002223 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 goto out;
2225
Ira Weiny4cd7c942015-06-06 14:38:31 -04002226 mad_size = recv->mad_size;
2227 response = alloc_mad_private(mad_size, GFP_KERNEL);
Hal Rosenstock445d6802007-08-03 10:45:17 -07002228 if (!response) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002229 dev_err(&port_priv->device->dev,
2230 "ib_mad_recv_done_handler no memory for response buffer\n");
Hal Rosenstock445d6802007-08-03 10:45:17 -07002231 goto out;
2232 }
2233
Hal Rosenstock41390322015-06-29 09:57:00 -04002234 if (rdma_cap_ib_switch(port_priv->device))
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002235 port_num = wc->port_num;
2236 else
2237 port_num = port_priv->port_num;
2238
Ira Weinyc9082e52015-06-06 14:38:30 -04002239 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
Ira Weiny8e4349d2015-06-10 16:16:48 -04002241 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2242 response, opa)
Ira Weinye11ae8a2015-06-06 14:38:24 -04002243 == IB_SMI_DISCARD)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 }
2246
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 /* Give driver "right of first refusal" on incoming MAD */
2248 if (port_priv->device->process_mad) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 ret = port_priv->device->process_mad(port_priv->device, 0,
2250 port_priv->port_num,
2251 wc, &recv->grh,
Ira Weiny4cd7c942015-06-06 14:38:31 -04002252 (const struct ib_mad_hdr *)recv->mad,
2253 recv->mad_size,
2254 (struct ib_mad_hdr *)response->mad,
2255 &mad_size, &resp_mad_pkey_index);
Ira Weiny8e4349d2015-06-10 16:16:48 -04002256
2257 if (opa)
2258 wc->pkey_index = resp_mad_pkey_index;
2259
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 if (ret & IB_MAD_RESULT_SUCCESS) {
2261 if (ret & IB_MAD_RESULT_CONSUMED)
2262 goto out;
2263 if (ret & IB_MAD_RESULT_REPLY) {
Ira Weinyc9082e52015-06-06 14:38:30 -04002264 agent_send_response((const struct ib_mad_hdr *)response->mad,
Sean Hefty34816ad2005-10-25 10:51:39 -07002265 &recv->grh, wc,
2266 port_priv->device,
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002267 port_num,
Ira Weinyc9082e52015-06-06 14:38:30 -04002268 qp_info->qp->qp_num,
Ira Weiny8e4349d2015-06-10 16:16:48 -04002269 mad_size, opa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 goto out;
2271 }
2272 }
2273 }
2274
Ira Weinyc9082e52015-06-06 14:38:30 -04002275 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 if (mad_agent) {
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07002277 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 /*
2279 * recv is freed up in error cases in ib_mad_complete_recv
2280 * or via recv_handler in ib_mad_complete_recv()
2281 */
2282 recv = NULL;
Jack Morgensteina9e74322012-04-24 16:08:57 -07002283 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
Ira Weiny8e4349d2015-06-10 16:16:48 -04002284 generate_unmatched_resp(recv, response, &mad_size, opa)) {
Ira Weinyc9082e52015-06-06 14:38:30 -04002285 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2286 port_priv->device, port_num,
Ira Weiny8e4349d2015-06-10 16:16:48 -04002287 qp_info->qp->qp_num, mad_size, opa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 }
2289
2290out:
2291 /* Post another receive request for this QP */
2292 if (response) {
2293 ib_mad_post_receive_mads(qp_info, response);
Ira Weinyc9082e52015-06-06 14:38:30 -04002294 kfree(recv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 } else
2296 ib_mad_post_receive_mads(qp_info, recv);
2297}
2298
2299static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2300{
2301 struct ib_mad_send_wr_private *mad_send_wr;
2302 unsigned long delay;
2303
2304 if (list_empty(&mad_agent_priv->wait_list)) {
Tejun Heo136b5722012-08-21 13:18:24 -07002305 cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 } else {
2307 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2308 struct ib_mad_send_wr_private,
2309 agent_list);
2310
2311 if (time_after(mad_agent_priv->timeout,
2312 mad_send_wr->timeout)) {
2313 mad_agent_priv->timeout = mad_send_wr->timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 delay = mad_send_wr->timeout - jiffies;
2315 if ((long)delay <= 0)
2316 delay = 1;
Tejun Heoe7c2f962012-08-21 13:18:24 -07002317 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2318 &mad_agent_priv->timed_work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 }
2320 }
2321}
2322
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002323static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324{
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002325 struct ib_mad_agent_private *mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 struct ib_mad_send_wr_private *temp_mad_send_wr;
2327 struct list_head *list_item;
2328 unsigned long delay;
2329
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002330 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 list_del(&mad_send_wr->agent_list);
2332
2333 delay = mad_send_wr->timeout;
2334 mad_send_wr->timeout += jiffies;
2335
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002336 if (delay) {
2337 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2338 temp_mad_send_wr = list_entry(list_item,
2339 struct ib_mad_send_wr_private,
2340 agent_list);
2341 if (time_after(mad_send_wr->timeout,
2342 temp_mad_send_wr->timeout))
2343 break;
2344 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 }
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002346 else
2347 list_item = &mad_agent_priv->wait_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 list_add(&mad_send_wr->agent_list, list_item);
2349
2350 /* Reschedule a work item if we have a shorter timeout */
Tejun Heoe7c2f962012-08-21 13:18:24 -07002351 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2352 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2353 &mad_agent_priv->timed_work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354}
2355
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002356void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2357 int timeout_ms)
2358{
2359 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2360 wait_for_response(mad_send_wr);
2361}
2362
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363/*
2364 * Process a send work completion
2365 */
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002366void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2367 struct ib_mad_send_wc *mad_send_wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368{
2369 struct ib_mad_agent_private *mad_agent_priv;
2370 unsigned long flags;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002371 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002373 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Ira Weiny1471cb62014-08-08 19:00:56 -04002375 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002376 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2377 if (ret == IB_RMPP_RESULT_CONSUMED)
2378 goto done;
2379 } else
2380 ret = IB_RMPP_RESULT_UNHANDLED;
2381
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 if (mad_send_wc->status != IB_WC_SUCCESS &&
2383 mad_send_wr->status == IB_WC_SUCCESS) {
2384 mad_send_wr->status = mad_send_wc->status;
2385 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2386 }
2387
2388 if (--mad_send_wr->refcount > 0) {
2389 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2390 mad_send_wr->status == IB_WC_SUCCESS) {
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002391 wait_for_response(mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002393 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 }
2395
2396 /* Remove send from MAD agent and notify client of completion */
2397 list_del(&mad_send_wr->agent_list);
2398 adjust_timeout(mad_agent_priv);
2399 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2400
2401 if (mad_send_wr->status != IB_WC_SUCCESS )
2402 mad_send_wc->status = mad_send_wr->status;
Sean Hefty34816ad2005-10-25 10:51:39 -07002403 if (ret == IB_RMPP_RESULT_INTERNAL)
2404 ib_rmpp_send_handler(mad_send_wc);
2405 else
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002406 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2407 mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408
2409 /* Release reference on agent taken when sending */
Sean Hefty1b52fa982006-05-12 14:57:52 -07002410 deref_mad_agent(mad_agent_priv);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002411 return;
2412done:
2413 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414}
2415
2416static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2417 struct ib_wc *wc)
2418{
2419 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2420 struct ib_mad_list_head *mad_list;
2421 struct ib_mad_qp_info *qp_info;
2422 struct ib_mad_queue *send_queue;
2423 struct ib_send_wr *bad_send_wr;
Sean Hefty34816ad2005-10-25 10:51:39 -07002424 struct ib_mad_send_wc mad_send_wc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 unsigned long flags;
2426 int ret;
2427
2428 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2429 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2430 mad_list);
2431 send_queue = mad_list->mad_queue;
2432 qp_info = send_queue->qp_info;
2433
2434retry:
Ralph Campbell15271062006-12-12 14:28:30 -08002435 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2436 mad_send_wr->header_mapping,
2437 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2438 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2439 mad_send_wr->payload_mapping,
2440 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 queued_send_wr = NULL;
2442 spin_lock_irqsave(&send_queue->lock, flags);
2443 list_del(&mad_list->list);
2444
2445 /* Move queued send to the send queue */
2446 if (send_queue->count-- > send_queue->max_active) {
2447 mad_list = container_of(qp_info->overflow_list.next,
2448 struct ib_mad_list_head, list);
2449 queued_send_wr = container_of(mad_list,
2450 struct ib_mad_send_wr_private,
2451 mad_list);
Akinobu Mita179e0912006-06-26 00:24:41 -07002452 list_move_tail(&mad_list->list, &send_queue->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 }
2454 spin_unlock_irqrestore(&send_queue->lock, flags);
2455
Sean Hefty34816ad2005-10-25 10:51:39 -07002456 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2457 mad_send_wc.status = wc->status;
2458 mad_send_wc.vendor_err = wc->vendor_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 if (atomic_read(&qp_info->snoop_count))
Sean Hefty34816ad2005-10-25 10:51:39 -07002460 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 IB_MAD_SNOOP_SEND_COMPLETIONS);
Sean Hefty34816ad2005-10-25 10:51:39 -07002462 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463
2464 if (queued_send_wr) {
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002465 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
Sean Hefty34816ad2005-10-25 10:51:39 -07002466 &bad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002468 dev_err(&port_priv->device->dev,
2469 "ib_post_send failed: %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 mad_send_wr = queued_send_wr;
2471 wc->status = IB_WC_LOC_QP_OP_ERR;
2472 goto retry;
2473 }
2474 }
2475}
2476
2477static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2478{
2479 struct ib_mad_send_wr_private *mad_send_wr;
2480 struct ib_mad_list_head *mad_list;
2481 unsigned long flags;
2482
2483 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2484 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2485 mad_send_wr = container_of(mad_list,
2486 struct ib_mad_send_wr_private,
2487 mad_list);
2488 mad_send_wr->retry = 1;
2489 }
2490 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2491}
2492
2493static void mad_error_handler(struct ib_mad_port_private *port_priv,
2494 struct ib_wc *wc)
2495{
2496 struct ib_mad_list_head *mad_list;
2497 struct ib_mad_qp_info *qp_info;
2498 struct ib_mad_send_wr_private *mad_send_wr;
2499 int ret;
2500
2501 /* Determine if failure was a send or receive */
2502 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2503 qp_info = mad_list->mad_queue->qp_info;
2504 if (mad_list->mad_queue == &qp_info->recv_queue)
2505 /*
2506 * Receive errors indicate that the QP has entered the error
2507 * state - error handling/shutdown code will cleanup
2508 */
2509 return;
2510
2511 /*
2512 * Send errors will transition the QP to SQE - move
2513 * QP to RTS and repost flushed work requests
2514 */
2515 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2516 mad_list);
2517 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2518 if (mad_send_wr->retry) {
2519 /* Repost send */
2520 struct ib_send_wr *bad_send_wr;
2521
2522 mad_send_wr->retry = 0;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002523 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 &bad_send_wr);
2525 if (ret)
2526 ib_mad_send_done_handler(port_priv, wc);
2527 } else
2528 ib_mad_send_done_handler(port_priv, wc);
2529 } else {
2530 struct ib_qp_attr *attr;
2531
2532 /* Transition QP to RTS and fail offending send */
2533 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2534 if (attr) {
2535 attr->qp_state = IB_QPS_RTS;
2536 attr->cur_qp_state = IB_QPS_SQE;
2537 ret = ib_modify_qp(qp_info->qp, attr,
2538 IB_QP_STATE | IB_QP_CUR_STATE);
2539 kfree(attr);
2540 if (ret)
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002541 dev_err(&port_priv->device->dev,
2542 "mad_error_handler - ib_modify_qp to RTS : %d\n",
2543 ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 else
2545 mark_sends_for_retry(qp_info);
2546 }
2547 ib_mad_send_done_handler(port_priv, wc);
2548 }
2549}
2550
2551/*
2552 * IB MAD completion callback
2553 */
David Howellsc4028952006-11-22 14:57:56 +00002554static void ib_mad_completion_handler(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555{
2556 struct ib_mad_port_private *port_priv;
2557 struct ib_wc wc;
2558
David Howellsc4028952006-11-22 14:57:56 +00002559 port_priv = container_of(work, struct ib_mad_port_private, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2561
2562 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2563 if (wc.status == IB_WC_SUCCESS) {
2564 switch (wc.opcode) {
2565 case IB_WC_SEND:
2566 ib_mad_send_done_handler(port_priv, &wc);
2567 break;
2568 case IB_WC_RECV:
2569 ib_mad_recv_done_handler(port_priv, &wc);
2570 break;
2571 default:
2572 BUG_ON(1);
2573 break;
2574 }
2575 } else
2576 mad_error_handler(port_priv, &wc);
2577 }
2578}
2579
2580static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2581{
2582 unsigned long flags;
2583 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2584 struct ib_mad_send_wc mad_send_wc;
2585 struct list_head cancel_list;
2586
2587 INIT_LIST_HEAD(&cancel_list);
2588
2589 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2590 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2591 &mad_agent_priv->send_list, agent_list) {
2592 if (mad_send_wr->status == IB_WC_SUCCESS) {
Roland Dreier3cd96562006-09-22 15:22:46 -07002593 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2595 }
2596 }
2597
2598 /* Empty wait list to prevent receives from finding a request */
2599 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2600 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2601
2602 /* Report all cancelled requests */
2603 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2604 mad_send_wc.vendor_err = 0;
2605
2606 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2607 &cancel_list, agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002608 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2609 list_del(&mad_send_wr->agent_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2611 &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 atomic_dec(&mad_agent_priv->refcount);
2613 }
2614}
2615
2616static struct ib_mad_send_wr_private*
Sean Hefty34816ad2005-10-25 10:51:39 -07002617find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2618 struct ib_mad_send_buf *send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619{
2620 struct ib_mad_send_wr_private *mad_send_wr;
2621
2622 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2623 agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002624 if (&mad_send_wr->send_buf == send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 return mad_send_wr;
2626 }
2627
2628 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2629 agent_list) {
Ira Weinyc597eee2015-05-08 13:10:03 -04002630 if (is_rmpp_data_mad(mad_agent_priv,
2631 mad_send_wr->send_buf.mad) &&
Sean Hefty34816ad2005-10-25 10:51:39 -07002632 &mad_send_wr->send_buf == send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 return mad_send_wr;
2634 }
2635 return NULL;
2636}
2637
Sean Hefty34816ad2005-10-25 10:51:39 -07002638int ib_modify_mad(struct ib_mad_agent *mad_agent,
2639 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640{
2641 struct ib_mad_agent_private *mad_agent_priv;
2642 struct ib_mad_send_wr_private *mad_send_wr;
2643 unsigned long flags;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002644 int active;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645
2646 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2647 agent);
2648 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Sean Hefty34816ad2005-10-25 10:51:39 -07002649 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002650 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002652 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 }
2654
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002655 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002656 if (!timeout_ms) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002658 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 }
2660
Sean Hefty34816ad2005-10-25 10:51:39 -07002661 mad_send_wr->send_buf.timeout_ms = timeout_ms;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002662 if (active)
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002663 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2664 else
2665 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002667 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2668 return 0;
2669}
2670EXPORT_SYMBOL(ib_modify_mad);
2671
Sean Hefty34816ad2005-10-25 10:51:39 -07002672void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2673 struct ib_mad_send_buf *send_buf)
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002674{
Sean Hefty34816ad2005-10-25 10:51:39 -07002675 ib_modify_mad(mad_agent, send_buf, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676}
2677EXPORT_SYMBOL(ib_cancel_mad);
2678
David Howellsc4028952006-11-22 14:57:56 +00002679static void local_completions(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680{
2681 struct ib_mad_agent_private *mad_agent_priv;
2682 struct ib_mad_local_private *local;
2683 struct ib_mad_agent_private *recv_mad_agent;
2684 unsigned long flags;
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002685 int free_mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 struct ib_wc wc;
2687 struct ib_mad_send_wc mad_send_wc;
Ira Weiny8e4349d2015-06-10 16:16:48 -04002688 bool opa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689
David Howellsc4028952006-11-22 14:57:56 +00002690 mad_agent_priv =
2691 container_of(work, struct ib_mad_agent_private, local_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692
Ira Weiny8e4349d2015-06-10 16:16:48 -04002693 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2694 mad_agent_priv->qp_info->port_priv->port_num);
2695
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2697 while (!list_empty(&mad_agent_priv->local_list)) {
2698 local = list_entry(mad_agent_priv->local_list.next,
2699 struct ib_mad_local_private,
2700 completion_list);
Michael S. Tsirkin37289ef2006-03-30 15:52:54 +02002701 list_del(&local->completion_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002703 free_mad = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 if (local->mad_priv) {
Ira Weiny8e4349d2015-06-10 16:16:48 -04002705 u8 base_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 recv_mad_agent = local->recv_mad_agent;
2707 if (!recv_mad_agent) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002708 dev_err(&mad_agent_priv->agent.device->dev,
2709 "No receive MAD agent for local completion\n");
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002710 free_mad = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 goto local_send_completion;
2712 }
2713
2714 /*
2715 * Defined behavior is to complete response
2716 * before request
2717 */
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +02002718 build_smp_wc(recv_mad_agent->agent.qp,
2719 (unsigned long) local->mad_send_wr,
Sean Hefty97f52eb2005-08-13 21:05:57 -07002720 be16_to_cpu(IB_LID_PERMISSIVE),
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002721 local->mad_send_wr->send_wr.pkey_index,
Ira Weiny8e4349d2015-06-10 16:16:48 -04002722 recv_mad_agent->agent.port_num, &wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723
2724 local->mad_priv->header.recv_wc.wc = &wc;
Ira Weiny8e4349d2015-06-10 16:16:48 -04002725
2726 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2727 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2728 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2729 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2730 } else {
2731 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2732 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2733 }
2734
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002735 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2736 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2737 &local->mad_priv->header.recv_wc.rmpp_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2739 local->mad_priv->header.recv_wc.recv_buf.mad =
Ira Weinyc9082e52015-06-06 14:38:30 -04002740 (struct ib_mad *)local->mad_priv->mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2742 snoop_recv(recv_mad_agent->qp_info,
2743 &local->mad_priv->header.recv_wc,
2744 IB_MAD_SNOOP_RECVS);
2745 recv_mad_agent->agent.recv_handler(
2746 &recv_mad_agent->agent,
2747 &local->mad_priv->header.recv_wc);
2748 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2749 atomic_dec(&recv_mad_agent->refcount);
2750 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2751 }
2752
2753local_send_completion:
2754 /* Complete send */
2755 mad_send_wc.status = IB_WC_SUCCESS;
2756 mad_send_wc.vendor_err = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07002757 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
Sean Hefty34816ad2005-10-25 10:51:39 -07002759 snoop_send(mad_agent_priv->qp_info,
2760 &local->mad_send_wr->send_buf,
2761 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2763 &mad_send_wc);
2764
2765 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 atomic_dec(&mad_agent_priv->refcount);
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002767 if (free_mad)
Ira Weinyc9082e52015-06-06 14:38:30 -04002768 kfree(local->mad_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 kfree(local);
2770 }
2771 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2772}
2773
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002774static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2775{
2776 int ret;
2777
Sean Hefty4fc8cd42007-11-27 00:11:04 -08002778 if (!mad_send_wr->retries_left)
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002779 return -ETIMEDOUT;
2780
Sean Hefty4fc8cd42007-11-27 00:11:04 -08002781 mad_send_wr->retries_left--;
2782 mad_send_wr->send_buf.retries++;
2783
Sean Hefty34816ad2005-10-25 10:51:39 -07002784 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002785
Ira Weiny1471cb62014-08-08 19:00:56 -04002786 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002787 ret = ib_retry_rmpp(mad_send_wr);
2788 switch (ret) {
2789 case IB_RMPP_RESULT_UNHANDLED:
2790 ret = ib_send_mad(mad_send_wr);
2791 break;
2792 case IB_RMPP_RESULT_CONSUMED:
2793 ret = 0;
2794 break;
2795 default:
2796 ret = -ECOMM;
2797 break;
2798 }
2799 } else
2800 ret = ib_send_mad(mad_send_wr);
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002801
2802 if (!ret) {
2803 mad_send_wr->refcount++;
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002804 list_add_tail(&mad_send_wr->agent_list,
2805 &mad_send_wr->mad_agent_priv->send_list);
2806 }
2807 return ret;
2808}
2809
David Howellsc4028952006-11-22 14:57:56 +00002810static void timeout_sends(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811{
2812 struct ib_mad_agent_private *mad_agent_priv;
2813 struct ib_mad_send_wr_private *mad_send_wr;
2814 struct ib_mad_send_wc mad_send_wc;
2815 unsigned long flags, delay;
2816
David Howellsc4028952006-11-22 14:57:56 +00002817 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2818 timed_work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 mad_send_wc.vendor_err = 0;
2820
2821 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2822 while (!list_empty(&mad_agent_priv->wait_list)) {
2823 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2824 struct ib_mad_send_wr_private,
2825 agent_list);
2826
2827 if (time_after(mad_send_wr->timeout, jiffies)) {
2828 delay = mad_send_wr->timeout - jiffies;
2829 if ((long)delay <= 0)
2830 delay = 1;
2831 queue_delayed_work(mad_agent_priv->qp_info->
2832 port_priv->wq,
2833 &mad_agent_priv->timed_work, delay);
2834 break;
2835 }
2836
Hal Rosenstockdbf92272005-07-27 11:45:30 -07002837 list_del(&mad_send_wr->agent_list);
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002838 if (mad_send_wr->status == IB_WC_SUCCESS &&
2839 !retry_send(mad_send_wr))
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002840 continue;
2841
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2843
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002844 if (mad_send_wr->status == IB_WC_SUCCESS)
2845 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2846 else
2847 mad_send_wc.status = mad_send_wr->status;
Sean Hefty34816ad2005-10-25 10:51:39 -07002848 mad_send_wc.send_buf = &mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2850 &mad_send_wc);
2851
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 atomic_dec(&mad_agent_priv->refcount);
2853 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2854 }
2855 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2856}
2857
Hal Rosenstock5dd2ce12005-08-15 14:16:36 -07002858static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859{
2860 struct ib_mad_port_private *port_priv = cq->cq_context;
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002861 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002863 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2864 if (!list_empty(&port_priv->port_list))
2865 queue_work(port_priv->wq, &port_priv->work);
2866 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867}
2868
2869/*
2870 * Allocate receive MADs and post receive WRs for them
2871 */
2872static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2873 struct ib_mad_private *mad)
2874{
2875 unsigned long flags;
2876 int post, ret;
2877 struct ib_mad_private *mad_priv;
2878 struct ib_sge sg_list;
2879 struct ib_recv_wr recv_wr, *bad_recv_wr;
2880 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2881
2882 /* Initialize common scatter list fields */
Jason Gunthorpe4be90bc2015-07-30 17:22:16 -06002883 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884
2885 /* Initialize common receive WR fields */
2886 recv_wr.next = NULL;
2887 recv_wr.sg_list = &sg_list;
2888 recv_wr.num_sge = 1;
2889
2890 do {
2891 /* Allocate and map receive buffer */
2892 if (mad) {
2893 mad_priv = mad;
2894 mad = NULL;
2895 } else {
Ira Weinyc9082e52015-06-06 14:38:30 -04002896 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2897 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 if (!mad_priv) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002899 dev_err(&qp_info->port_priv->device->dev,
2900 "No memory for receive buffer\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901 ret = -ENOMEM;
2902 break;
2903 }
2904 }
Ira Weinyc9082e52015-06-06 14:38:30 -04002905 sg_list.length = mad_priv_dma_size(mad_priv);
Ralph Campbell15271062006-12-12 14:28:30 -08002906 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2907 &mad_priv->grh,
Ira Weinyc9082e52015-06-06 14:38:30 -04002908 mad_priv_dma_size(mad_priv),
Ralph Campbell15271062006-12-12 14:28:30 -08002909 DMA_FROM_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02002910 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2911 sg_list.addr))) {
2912 ret = -ENOMEM;
2913 break;
2914 }
Ralph Campbell15271062006-12-12 14:28:30 -08002915 mad_priv->header.mapping = sg_list.addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2917 mad_priv->header.mad_list.mad_queue = recv_queue;
2918
2919 /* Post receive WR */
2920 spin_lock_irqsave(&recv_queue->lock, flags);
2921 post = (++recv_queue->count < recv_queue->max_active);
2922 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2923 spin_unlock_irqrestore(&recv_queue->lock, flags);
2924 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2925 if (ret) {
2926 spin_lock_irqsave(&recv_queue->lock, flags);
2927 list_del(&mad_priv->header.mad_list.list);
2928 recv_queue->count--;
2929 spin_unlock_irqrestore(&recv_queue->lock, flags);
Ralph Campbell15271062006-12-12 14:28:30 -08002930 ib_dma_unmap_single(qp_info->port_priv->device,
2931 mad_priv->header.mapping,
Ira Weinyc9082e52015-06-06 14:38:30 -04002932 mad_priv_dma_size(mad_priv),
Ralph Campbell15271062006-12-12 14:28:30 -08002933 DMA_FROM_DEVICE);
Ira Weinyc9082e52015-06-06 14:38:30 -04002934 kfree(mad_priv);
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002935 dev_err(&qp_info->port_priv->device->dev,
2936 "ib_post_recv failed: %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937 break;
2938 }
2939 } while (post);
2940
2941 return ret;
2942}
2943
2944/*
2945 * Return all the posted receive MADs
2946 */
2947static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2948{
2949 struct ib_mad_private_header *mad_priv_hdr;
2950 struct ib_mad_private *recv;
2951 struct ib_mad_list_head *mad_list;
2952
Eli Cohenfac70d52010-09-27 17:51:11 -07002953 if (!qp_info->qp)
2954 return;
2955
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956 while (!list_empty(&qp_info->recv_queue.list)) {
2957
2958 mad_list = list_entry(qp_info->recv_queue.list.next,
2959 struct ib_mad_list_head, list);
2960 mad_priv_hdr = container_of(mad_list,
2961 struct ib_mad_private_header,
2962 mad_list);
2963 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2964 header);
2965
2966 /* Remove from posted receive MAD list */
2967 list_del(&mad_list->list);
2968
Ralph Campbell15271062006-12-12 14:28:30 -08002969 ib_dma_unmap_single(qp_info->port_priv->device,
2970 recv->header.mapping,
Ira Weinyc9082e52015-06-06 14:38:30 -04002971 mad_priv_dma_size(recv),
Ralph Campbell15271062006-12-12 14:28:30 -08002972 DMA_FROM_DEVICE);
Ira Weinyc9082e52015-06-06 14:38:30 -04002973 kfree(recv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 }
2975
2976 qp_info->recv_queue.count = 0;
2977}
2978
2979/*
2980 * Start the port
2981 */
2982static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2983{
2984 int ret, i;
2985 struct ib_qp_attr *attr;
2986 struct ib_qp *qp;
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002987 u16 pkey_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988
2989 attr = kmalloc(sizeof *attr, GFP_KERNEL);
Roland Dreier3cd96562006-09-22 15:22:46 -07002990 if (!attr) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002991 dev_err(&port_priv->device->dev,
2992 "Couldn't kmalloc ib_qp_attr\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 return -ENOMEM;
2994 }
2995
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002996 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2997 IB_DEFAULT_PKEY_FULL, &pkey_index);
2998 if (ret)
2999 pkey_index = 0;
3000
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3002 qp = port_priv->qp_info[i].qp;
Eli Cohenfac70d52010-09-27 17:51:11 -07003003 if (!qp)
3004 continue;
3005
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 /*
3007 * PKey index for QP1 is irrelevant but
3008 * one is needed for the Reset to Init transition
3009 */
3010 attr->qp_state = IB_QPS_INIT;
Jack Morgensteinef5ed412013-07-18 14:02:29 +03003011 attr->pkey_index = pkey_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
3013 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
3014 IB_QP_PKEY_INDEX | IB_QP_QKEY);
3015 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003016 dev_err(&port_priv->device->dev,
3017 "Couldn't change QP%d state to INIT: %d\n",
3018 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019 goto out;
3020 }
3021
3022 attr->qp_state = IB_QPS_RTR;
3023 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3024 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003025 dev_err(&port_priv->device->dev,
3026 "Couldn't change QP%d state to RTR: %d\n",
3027 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 goto out;
3029 }
3030
3031 attr->qp_state = IB_QPS_RTS;
3032 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3033 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3034 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003035 dev_err(&port_priv->device->dev,
3036 "Couldn't change QP%d state to RTS: %d\n",
3037 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038 goto out;
3039 }
3040 }
3041
3042 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3043 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003044 dev_err(&port_priv->device->dev,
3045 "Failed to request completion notification: %d\n",
3046 ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 goto out;
3048 }
3049
3050 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
Eli Cohenfac70d52010-09-27 17:51:11 -07003051 if (!port_priv->qp_info[i].qp)
3052 continue;
3053
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3055 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003056 dev_err(&port_priv->device->dev,
3057 "Couldn't post receive WRs\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058 goto out;
3059 }
3060 }
3061out:
3062 kfree(attr);
3063 return ret;
3064}
3065
3066static void qp_event_handler(struct ib_event *event, void *qp_context)
3067{
3068 struct ib_mad_qp_info *qp_info = qp_context;
3069
3070 /* It's worse than that! He's dead, Jim! */
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003071 dev_err(&qp_info->port_priv->device->dev,
3072 "Fatal error (%d) on MAD QP (%d)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073 event->event, qp_info->qp->qp_num);
3074}
3075
3076static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3077 struct ib_mad_queue *mad_queue)
3078{
3079 mad_queue->qp_info = qp_info;
3080 mad_queue->count = 0;
3081 spin_lock_init(&mad_queue->lock);
3082 INIT_LIST_HEAD(&mad_queue->list);
3083}
3084
3085static void init_mad_qp(struct ib_mad_port_private *port_priv,
3086 struct ib_mad_qp_info *qp_info)
3087{
3088 qp_info->port_priv = port_priv;
3089 init_mad_queue(qp_info, &qp_info->send_queue);
3090 init_mad_queue(qp_info, &qp_info->recv_queue);
3091 INIT_LIST_HEAD(&qp_info->overflow_list);
3092 spin_lock_init(&qp_info->snoop_lock);
3093 qp_info->snoop_table = NULL;
3094 qp_info->snoop_table_size = 0;
3095 atomic_set(&qp_info->snoop_count, 0);
3096}
3097
3098static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3099 enum ib_qp_type qp_type)
3100{
3101 struct ib_qp_init_attr qp_init_attr;
3102 int ret;
3103
3104 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3105 qp_init_attr.send_cq = qp_info->port_priv->cq;
3106 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3107 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07003108 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3109 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3111 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3112 qp_init_attr.qp_type = qp_type;
3113 qp_init_attr.port_num = qp_info->port_priv->port_num;
3114 qp_init_attr.qp_context = qp_info;
3115 qp_init_attr.event_handler = qp_event_handler;
3116 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3117 if (IS_ERR(qp_info->qp)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003118 dev_err(&qp_info->port_priv->device->dev,
3119 "Couldn't create ib_mad QP%d\n",
3120 get_spl_qp_index(qp_type));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121 ret = PTR_ERR(qp_info->qp);
3122 goto error;
3123 }
3124 /* Use minimum queue sizes unless the CQ is resized */
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07003125 qp_info->send_queue.max_active = mad_sendq_size;
3126 qp_info->recv_queue.max_active = mad_recvq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127 return 0;
3128
3129error:
3130 return ret;
3131}
3132
3133static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3134{
Eli Cohenfac70d52010-09-27 17:51:11 -07003135 if (!qp_info->qp)
3136 return;
3137
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138 ib_destroy_qp(qp_info->qp);
Jesper Juhl6044ec82005-11-07 01:01:32 -08003139 kfree(qp_info->snoop_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140}
3141
3142/*
3143 * Open the port
3144 * Create the QP, PD, MR, and CQ if needed
3145 */
3146static int ib_mad_port_open(struct ib_device *device,
3147 int port_num)
3148{
3149 int ret, cq_size;
3150 struct ib_mad_port_private *port_priv;
3151 unsigned long flags;
3152 char name[sizeof "ib_mad123"];
Eli Cohenfac70d52010-09-27 17:51:11 -07003153 int has_smi;
Matan Barak8e372102015-06-11 16:35:21 +03003154 struct ib_cq_init_attr cq_attr = {};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155
Ira Weiny337877a2015-06-06 14:38:29 -04003156 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3157 return -EFAULT;
3158
Ira Weiny548ead12015-06-06 14:38:33 -04003159 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3160 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3161 return -EFAULT;
3162
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 /* Create new device info */
Roland Dreierde6eb662005-11-02 07:23:14 -08003164 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 if (!port_priv) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003166 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167 return -ENOMEM;
3168 }
Roland Dreierde6eb662005-11-02 07:23:14 -08003169
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170 port_priv->device = device;
3171 port_priv->port_num = port_num;
3172 spin_lock_init(&port_priv->reg_lock);
3173 INIT_LIST_HEAD(&port_priv->agent_list);
3174 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3175 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3176
Eli Cohenfac70d52010-09-27 17:51:11 -07003177 cq_size = mad_sendq_size + mad_recvq_size;
Michael Wang29541e32015-05-05 14:50:33 +02003178 has_smi = rdma_cap_ib_smi(device, port_num);
Eli Cohenfac70d52010-09-27 17:51:11 -07003179 if (has_smi)
3180 cq_size *= 2;
3181
Matan Barak8e372102015-06-11 16:35:21 +03003182 cq_attr.cqe = cq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183 port_priv->cq = ib_create_cq(port_priv->device,
Hal Rosenstock5dd2ce12005-08-15 14:16:36 -07003184 ib_mad_thread_completion_handler,
Matan Barak8e372102015-06-11 16:35:21 +03003185 NULL, port_priv, &cq_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186 if (IS_ERR(port_priv->cq)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003187 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188 ret = PTR_ERR(port_priv->cq);
3189 goto error3;
3190 }
3191
3192 port_priv->pd = ib_alloc_pd(device);
3193 if (IS_ERR(port_priv->pd)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003194 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 ret = PTR_ERR(port_priv->pd);
3196 goto error4;
3197 }
3198
Eli Cohenfac70d52010-09-27 17:51:11 -07003199 if (has_smi) {
3200 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3201 if (ret)
3202 goto error6;
3203 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3205 if (ret)
3206 goto error7;
3207
3208 snprintf(name, sizeof name, "ib_mad%d", port_num);
3209 port_priv->wq = create_singlethread_workqueue(name);
3210 if (!port_priv->wq) {
3211 ret = -ENOMEM;
3212 goto error8;
3213 }
David Howellsc4028952006-11-22 14:57:56 +00003214 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215
Michael S. Tsirkindc059802006-03-20 10:08:25 -08003216 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3217 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3218 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3219
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220 ret = ib_mad_port_start(port_priv);
3221 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003222 dev_err(&device->dev, "Couldn't start port\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223 goto error9;
3224 }
3225
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226 return 0;
3227
3228error9:
Michael S. Tsirkindc059802006-03-20 10:08:25 -08003229 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3230 list_del_init(&port_priv->port_list);
3231 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3232
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233 destroy_workqueue(port_priv->wq);
3234error8:
3235 destroy_mad_qp(&port_priv->qp_info[1]);
3236error7:
3237 destroy_mad_qp(&port_priv->qp_info[0]);
3238error6:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 ib_dealloc_pd(port_priv->pd);
3240error4:
3241 ib_destroy_cq(port_priv->cq);
3242 cleanup_recv_queue(&port_priv->qp_info[1]);
3243 cleanup_recv_queue(&port_priv->qp_info[0]);
3244error3:
3245 kfree(port_priv);
3246
3247 return ret;
3248}
3249
3250/*
3251 * Close the port
3252 * If there are no classes using the port, free the port
3253 * resources (CQ, MR, PD, QP) and remove the port's info structure
3254 */
3255static int ib_mad_port_close(struct ib_device *device, int port_num)
3256{
3257 struct ib_mad_port_private *port_priv;
3258 unsigned long flags;
3259
3260 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3261 port_priv = __ib_get_mad_port(device, port_num);
3262 if (port_priv == NULL) {
3263 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003264 dev_err(&device->dev, "Port %d not found\n", port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265 return -ENODEV;
3266 }
Michael S. Tsirkindc059802006-03-20 10:08:25 -08003267 list_del_init(&port_priv->port_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3269
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270 destroy_workqueue(port_priv->wq);
3271 destroy_mad_qp(&port_priv->qp_info[1]);
3272 destroy_mad_qp(&port_priv->qp_info[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273 ib_dealloc_pd(port_priv->pd);
3274 ib_destroy_cq(port_priv->cq);
3275 cleanup_recv_queue(&port_priv->qp_info[1]);
3276 cleanup_recv_queue(&port_priv->qp_info[0]);
3277 /* XXX: Handle deallocation of MAD registration tables */
3278
3279 kfree(port_priv);
3280
3281 return 0;
3282}
3283
3284static void ib_mad_init_device(struct ib_device *device)
3285{
Hal Rosenstock41390322015-06-29 09:57:00 -04003286 int start, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287
Hal Rosenstock41390322015-06-29 09:57:00 -04003288 start = rdma_start_port(device);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003289
Hal Rosenstock41390322015-06-29 09:57:00 -04003290 for (i = start; i <= rdma_end_port(device); i++) {
Michael Wangc757dea2015-05-05 14:50:32 +02003291 if (!rdma_cap_ib_mad(device, i))
Michael Wang827f2a82015-05-05 14:50:20 +02003292 continue;
3293
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003294 if (ib_mad_port_open(device, i)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003295 dev_err(&device->dev, "Couldn't open port %d\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003296 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297 }
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003298 if (ib_agent_port_open(device, i)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003299 dev_err(&device->dev,
3300 "Couldn't open port %d for agents\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003301 goto error_agent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302 }
3303 }
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07003304 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003306error_agent:
3307 if (ib_mad_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003308 dev_err(&device->dev, "Couldn't close port %d\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003309
3310error:
Michael Wang827f2a82015-05-05 14:50:20 +02003311 while (--i >= start) {
Michael Wangc757dea2015-05-05 14:50:32 +02003312 if (!rdma_cap_ib_mad(device, i))
Michael Wang827f2a82015-05-05 14:50:20 +02003313 continue;
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003314
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003315 if (ib_agent_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003316 dev_err(&device->dev,
3317 "Couldn't close port %d for agents\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003318 if (ib_mad_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003319 dev_err(&device->dev, "Couldn't close port %d\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321}
3322
Haggai Eran7c1eb452015-07-30 17:50:14 +03003323static void ib_mad_remove_device(struct ib_device *device, void *client_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324{
Hal Rosenstock41390322015-06-29 09:57:00 -04003325 int i;
Steve Wise070e1402010-03-04 18:18:18 +00003326
Hal Rosenstock41390322015-06-29 09:57:00 -04003327 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
Michael Wangc757dea2015-05-05 14:50:32 +02003328 if (!rdma_cap_ib_mad(device, i))
Michael Wang827f2a82015-05-05 14:50:20 +02003329 continue;
3330
3331 if (ib_agent_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003332 dev_err(&device->dev,
Michael Wang827f2a82015-05-05 14:50:20 +02003333 "Couldn't close port %d for agents\n", i);
3334 if (ib_mad_port_close(device, i))
3335 dev_err(&device->dev, "Couldn't close port %d\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 }
3337}
3338
3339static struct ib_client mad_client = {
3340 .name = "mad",
3341 .add = ib_mad_init_device,
3342 .remove = ib_mad_remove_device
3343};
3344
3345static int __init ib_mad_init_module(void)
3346{
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07003347 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3348 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3349
3350 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3351 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3352
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353 INIT_LIST_HEAD(&ib_mad_port_list);
3354
3355 if (ib_register_client(&mad_client)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003356 pr_err("Couldn't register ib_mad client\n");
Ira Weinyc9082e52015-06-06 14:38:30 -04003357 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358 }
3359
3360 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361}
3362
3363static void __exit ib_mad_cleanup_module(void)
3364{
3365 ib_unregister_client(&mad_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366}
3367
3368module_init(ib_mad_init_module);
3369module_exit(ib_mad_cleanup_module);