blob: c3452fc54e8887487faf0629daf1d85c64b0697a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Hal Rosenstockde493d42007-04-02 11:24:07 -04002 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
Hal Rosenstockfa619a72005-07-27 11:45:37 -07003 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07005 * Copyright (c) 2009 HNR Consulting. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 */
Ira Weiny7ef5d4b2014-08-08 19:00:53 -040036
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040041#include <linux/module.h>
Jack Morgenstein9874e742006-06-17 20:37:34 -070042#include <rdma/ib_cache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include "mad_priv.h"
Hal Rosenstockfa619a72005-07-27 11:45:37 -070045#include "mad_rmpp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include "smi.h"
47#include "agent.h"
48
49MODULE_LICENSE("Dual BSD/GPL");
50MODULE_DESCRIPTION("kernel IB MAD API");
51MODULE_AUTHOR("Hal Rosenstock");
52MODULE_AUTHOR("Sean Hefty");
53
Roland Dreier16933952010-05-23 21:39:31 -070054static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
55static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
Hal Rosenstockb76aabc2009-09-07 08:28:48 -070056
57module_param_named(send_queue_size, mad_sendq_size, int, 0444);
58MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
59module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
60MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
61
Roland Dreiere54f8182006-11-29 15:33:07 -080062static struct kmem_cache *ib_mad_cache;
Hal Rosenstockfa619a72005-07-27 11:45:37 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064static struct list_head ib_mad_port_list;
65static u32 ib_mad_client_id = 0;
66
67/* Port list lock */
Roland Dreier6276e082009-09-05 20:24:23 -070068static DEFINE_SPINLOCK(ib_mad_port_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70/* Forward declarations */
71static int method_in_use(struct ib_mad_mgmt_method_table **method,
72 struct ib_mad_reg_req *mad_reg_req);
73static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
74static struct ib_mad_agent_private *find_mad_agent(
75 struct ib_mad_port_private *port_priv,
Ira Weinyd94bd262015-06-06 14:38:22 -040076 const struct ib_mad_hdr *mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
78 struct ib_mad_private *mad);
79static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
David Howellsc4028952006-11-22 14:57:56 +000080static void timeout_sends(struct work_struct *work);
81static void local_completions(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
83 struct ib_mad_agent_private *agent_priv,
84 u8 mgmt_class);
85static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86 struct ib_mad_agent_private *agent_priv);
87
88/*
89 * Returns a ib_mad_port_private structure or NULL for a device/port
90 * Assumes ib_mad_port_list_lock is being held
91 */
92static inline struct ib_mad_port_private *
93__ib_get_mad_port(struct ib_device *device, int port_num)
94{
95 struct ib_mad_port_private *entry;
96
97 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
98 if (entry->device == device && entry->port_num == port_num)
99 return entry;
100 }
101 return NULL;
102}
103
104/*
105 * Wrapper function to return a ib_mad_port_private structure or NULL
106 * for a device/port
107 */
108static inline struct ib_mad_port_private *
109ib_get_mad_port(struct ib_device *device, int port_num)
110{
111 struct ib_mad_port_private *entry;
112 unsigned long flags;
113
114 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
115 entry = __ib_get_mad_port(device, port_num);
116 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
117
118 return entry;
119}
120
121static inline u8 convert_mgmt_class(u8 mgmt_class)
122{
123 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
124 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
125 0 : mgmt_class;
126}
127
128static int get_spl_qp_index(enum ib_qp_type qp_type)
129{
130 switch (qp_type)
131 {
132 case IB_QPT_SMI:
133 return 0;
134 case IB_QPT_GSI:
135 return 1;
136 default:
137 return -1;
138 }
139}
140
141static int vendor_class_index(u8 mgmt_class)
142{
143 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
144}
145
146static int is_vendor_class(u8 mgmt_class)
147{
148 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
149 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
150 return 0;
151 return 1;
152}
153
154static int is_vendor_oui(char *oui)
155{
156 if (oui[0] || oui[1] || oui[2])
157 return 1;
158 return 0;
159}
160
161static int is_vendor_method_in_use(
162 struct ib_mad_mgmt_vendor_class *vendor_class,
163 struct ib_mad_reg_req *mad_reg_req)
164{
165 struct ib_mad_mgmt_method_table *method;
166 int i;
167
168 for (i = 0; i < MAX_MGMT_OUI; i++) {
169 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
170 method = vendor_class->method_table[i];
171 if (method) {
172 if (method_in_use(&method, mad_reg_req))
173 return 1;
174 else
175 break;
176 }
177 }
178 }
179 return 0;
180}
181
Ira Weiny96909302015-05-08 14:27:22 -0400182int ib_response_mad(const struct ib_mad_hdr *hdr)
Sean Hefty2527e682006-07-20 11:25:50 +0300183{
Ira Weiny96909302015-05-08 14:27:22 -0400184 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
185 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
186 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
187 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
Sean Hefty2527e682006-07-20 11:25:50 +0300188}
189EXPORT_SYMBOL(ib_response_mad);
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191/*
192 * ib_register_mad_agent - Register to send/receive MADs
193 */
194struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
195 u8 port_num,
196 enum ib_qp_type qp_type,
197 struct ib_mad_reg_req *mad_reg_req,
198 u8 rmpp_version,
199 ib_mad_send_handler send_handler,
200 ib_mad_recv_handler recv_handler,
Ira Weiny0f29b462014-08-08 19:00:55 -0400201 void *context,
202 u32 registration_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
204 struct ib_mad_port_private *port_priv;
205 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
206 struct ib_mad_agent_private *mad_agent_priv;
207 struct ib_mad_reg_req *reg_req = NULL;
208 struct ib_mad_mgmt_class_table *class;
209 struct ib_mad_mgmt_vendor_class_table *vendor;
210 struct ib_mad_mgmt_vendor_class *vendor_class;
211 struct ib_mad_mgmt_method_table *method;
212 int ret2, qpn;
213 unsigned long flags;
214 u8 mgmt_class, vclass;
215
216 /* Validate parameters */
217 qpn = get_spl_qp_index(qp_type);
Ira Weiny9ad13a42014-08-08 19:00:54 -0400218 if (qpn == -1) {
219 dev_notice(&device->dev,
220 "ib_register_mad_agent: invalid QP Type %d\n",
221 qp_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400223 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Ira Weiny9ad13a42014-08-08 19:00:54 -0400225 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
226 dev_notice(&device->dev,
227 "ib_register_mad_agent: invalid RMPP Version %u\n",
228 rmpp_version);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700229 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
232 /* Validate MAD registration request if supplied */
233 if (mad_reg_req) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400234 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
235 dev_notice(&device->dev,
236 "ib_register_mad_agent: invalid Class Version %u\n",
237 mad_reg_req->mgmt_class_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400239 }
240 if (!recv_handler) {
241 dev_notice(&device->dev,
242 "ib_register_mad_agent: no recv_handler\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
246 /*
247 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
248 * one in this range currently allowed
249 */
250 if (mad_reg_req->mgmt_class !=
Ira Weiny9ad13a42014-08-08 19:00:54 -0400251 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
252 dev_notice(&device->dev,
253 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
254 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 } else if (mad_reg_req->mgmt_class == 0) {
258 /*
259 * Class 0 is reserved in IBA and is used for
260 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
261 */
Ira Weiny9ad13a42014-08-08 19:00:54 -0400262 dev_notice(&device->dev,
263 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 goto error1;
265 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
266 /*
267 * If class is in "new" vendor range,
268 * ensure supplied OUI is not zero
269 */
Ira Weiny9ad13a42014-08-08 19:00:54 -0400270 if (!is_vendor_oui(mad_reg_req->oui)) {
271 dev_notice(&device->dev,
272 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
273 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 }
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800277 /* Make sure class supplied is consistent with RMPP */
Hal Rosenstock64cb9c62006-04-12 21:29:10 -0400278 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400279 if (rmpp_version) {
280 dev_notice(&device->dev,
281 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
282 mad_reg_req->mgmt_class);
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800283 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400284 }
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800285 }
Ira Weiny1471cb62014-08-08 19:00:56 -0400286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 /* Make sure class supplied is consistent with QP type */
288 if (qp_type == IB_QPT_SMI) {
289 if ((mad_reg_req->mgmt_class !=
290 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
291 (mad_reg_req->mgmt_class !=
Ira Weiny9ad13a42014-08-08 19:00:54 -0400292 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
293 dev_notice(&device->dev,
294 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
295 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400297 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 } else {
299 if ((mad_reg_req->mgmt_class ==
300 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
301 (mad_reg_req->mgmt_class ==
Ira Weiny9ad13a42014-08-08 19:00:54 -0400302 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
303 dev_notice(&device->dev,
304 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
305 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400307 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 }
309 } else {
310 /* No registration request supplied */
311 if (!send_handler)
312 goto error1;
Ira Weiny1471cb62014-08-08 19:00:56 -0400313 if (registration_flags & IB_MAD_USER_RMPP)
314 goto error1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 }
316
317 /* Validate device and port */
318 port_priv = ib_get_mad_port(device, port_num);
319 if (!port_priv) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400320 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 ret = ERR_PTR(-ENODEV);
322 goto error1;
323 }
324
Ira Weinyc8367c42011-05-19 18:19:28 -0700325 /* Verify the QP requested is supported. For example, Ethernet devices
326 * will not have QP0 */
327 if (!port_priv->qp_info[qpn].qp) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400328 dev_notice(&device->dev,
329 "ib_register_mad_agent: QP %d not supported\n", qpn);
Ira Weinyc8367c42011-05-19 18:19:28 -0700330 ret = ERR_PTR(-EPROTONOSUPPORT);
331 goto error1;
332 }
333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 /* Allocate structures */
Roland Dreierde6eb662005-11-02 07:23:14 -0800335 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 if (!mad_agent_priv) {
337 ret = ERR_PTR(-ENOMEM);
338 goto error1;
339 }
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700340
341 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
342 IB_ACCESS_LOCAL_WRITE);
343 if (IS_ERR(mad_agent_priv->agent.mr)) {
344 ret = ERR_PTR(-ENOMEM);
345 goto error2;
346 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 if (mad_reg_req) {
Julia Lawall9893e742010-05-15 23:22:38 +0200349 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 if (!reg_req) {
351 ret = ERR_PTR(-ENOMEM);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700352 goto error3;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 }
355
356 /* Now, fill in the various structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
358 mad_agent_priv->reg_req = reg_req;
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700359 mad_agent_priv->agent.rmpp_version = rmpp_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 mad_agent_priv->agent.device = device;
361 mad_agent_priv->agent.recv_handler = recv_handler;
362 mad_agent_priv->agent.send_handler = send_handler;
363 mad_agent_priv->agent.context = context;
364 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
365 mad_agent_priv->agent.port_num = port_num;
Ira Weiny0f29b462014-08-08 19:00:55 -0400366 mad_agent_priv->agent.flags = registration_flags;
Ralph Campbelld9620a42009-02-27 14:44:32 -0800367 spin_lock_init(&mad_agent_priv->lock);
368 INIT_LIST_HEAD(&mad_agent_priv->send_list);
369 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
370 INIT_LIST_HEAD(&mad_agent_priv->done_list);
371 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
372 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
373 INIT_LIST_HEAD(&mad_agent_priv->local_list);
374 INIT_WORK(&mad_agent_priv->local_work, local_completions);
375 atomic_set(&mad_agent_priv->refcount, 1);
376 init_completion(&mad_agent_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
378 spin_lock_irqsave(&port_priv->reg_lock, flags);
379 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
380
381 /*
382 * Make sure MAD registration (if supplied)
383 * is non overlapping with any existing ones
384 */
385 if (mad_reg_req) {
386 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
387 if (!is_vendor_class(mgmt_class)) {
388 class = port_priv->version[mad_reg_req->
389 mgmt_class_version].class;
390 if (class) {
391 method = class->method_table[mgmt_class];
392 if (method) {
393 if (method_in_use(&method,
394 mad_reg_req))
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700395 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 }
397 }
398 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
399 mgmt_class);
400 } else {
401 /* "New" vendor class range */
402 vendor = port_priv->version[mad_reg_req->
403 mgmt_class_version].vendor;
404 if (vendor) {
405 vclass = vendor_class_index(mgmt_class);
406 vendor_class = vendor->vendor_class[vclass];
407 if (vendor_class) {
408 if (is_vendor_method_in_use(
409 vendor_class,
410 mad_reg_req))
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700411 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 }
413 }
414 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
415 }
416 if (ret2) {
417 ret = ERR_PTR(ret2);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700418 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 }
420 }
421
422 /* Add mad agent into port's agent list */
423 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
424 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 return &mad_agent_priv->agent;
427
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700428error4:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
430 kfree(reg_req);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700431error3:
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700432 ib_dereg_mr(mad_agent_priv->agent.mr);
Adrian Bunk2012a112005-11-27 00:37:36 +0100433error2:
434 kfree(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435error1:
436 return ret;
437}
438EXPORT_SYMBOL(ib_register_mad_agent);
439
440static inline int is_snooping_sends(int mad_snoop_flags)
441{
442 return (mad_snoop_flags &
443 (/*IB_MAD_SNOOP_POSTED_SENDS |
444 IB_MAD_SNOOP_RMPP_SENDS |*/
445 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
446 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
447}
448
449static inline int is_snooping_recvs(int mad_snoop_flags)
450{
451 return (mad_snoop_flags &
452 (IB_MAD_SNOOP_RECVS /*|
453 IB_MAD_SNOOP_RMPP_RECVS*/));
454}
455
456static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
457 struct ib_mad_snoop_private *mad_snoop_priv)
458{
459 struct ib_mad_snoop_private **new_snoop_table;
460 unsigned long flags;
461 int i;
462
463 spin_lock_irqsave(&qp_info->snoop_lock, flags);
464 /* Check for empty slot in array. */
465 for (i = 0; i < qp_info->snoop_table_size; i++)
466 if (!qp_info->snoop_table[i])
467 break;
468
469 if (i == qp_info->snoop_table_size) {
470 /* Grow table. */
Roland Dreier528051742008-10-14 14:05:36 -0700471 new_snoop_table = krealloc(qp_info->snoop_table,
472 sizeof mad_snoop_priv *
473 (qp_info->snoop_table_size + 1),
474 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 if (!new_snoop_table) {
476 i = -ENOMEM;
477 goto out;
478 }
Roland Dreier528051742008-10-14 14:05:36 -0700479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 qp_info->snoop_table = new_snoop_table;
481 qp_info->snoop_table_size++;
482 }
483 qp_info->snoop_table[i] = mad_snoop_priv;
484 atomic_inc(&qp_info->snoop_count);
485out:
486 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
487 return i;
488}
489
490struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
491 u8 port_num,
492 enum ib_qp_type qp_type,
493 int mad_snoop_flags,
494 ib_mad_snoop_handler snoop_handler,
495 ib_mad_recv_handler recv_handler,
496 void *context)
497{
498 struct ib_mad_port_private *port_priv;
499 struct ib_mad_agent *ret;
500 struct ib_mad_snoop_private *mad_snoop_priv;
501 int qpn;
502
503 /* Validate parameters */
504 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
505 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
506 ret = ERR_PTR(-EINVAL);
507 goto error1;
508 }
509 qpn = get_spl_qp_index(qp_type);
510 if (qpn == -1) {
511 ret = ERR_PTR(-EINVAL);
512 goto error1;
513 }
514 port_priv = ib_get_mad_port(device, port_num);
515 if (!port_priv) {
516 ret = ERR_PTR(-ENODEV);
517 goto error1;
518 }
519 /* Allocate structures */
Roland Dreierde6eb662005-11-02 07:23:14 -0800520 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 if (!mad_snoop_priv) {
522 ret = ERR_PTR(-ENOMEM);
523 goto error1;
524 }
525
526 /* Now, fill in the various structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
528 mad_snoop_priv->agent.device = device;
529 mad_snoop_priv->agent.recv_handler = recv_handler;
530 mad_snoop_priv->agent.snoop_handler = snoop_handler;
531 mad_snoop_priv->agent.context = context;
532 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
533 mad_snoop_priv->agent.port_num = port_num;
534 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
Sean Hefty1b52fa982006-05-12 14:57:52 -0700535 init_completion(&mad_snoop_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 mad_snoop_priv->snoop_index = register_snoop_agent(
537 &port_priv->qp_info[qpn],
538 mad_snoop_priv);
539 if (mad_snoop_priv->snoop_index < 0) {
540 ret = ERR_PTR(mad_snoop_priv->snoop_index);
541 goto error2;
542 }
543
544 atomic_set(&mad_snoop_priv->refcount, 1);
545 return &mad_snoop_priv->agent;
546
547error2:
548 kfree(mad_snoop_priv);
549error1:
550 return ret;
551}
552EXPORT_SYMBOL(ib_register_mad_snoop);
553
Sean Hefty1b52fa982006-05-12 14:57:52 -0700554static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
555{
556 if (atomic_dec_and_test(&mad_agent_priv->refcount))
557 complete(&mad_agent_priv->comp);
558}
559
560static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
561{
562 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
563 complete(&mad_snoop_priv->comp);
564}
565
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
567{
568 struct ib_mad_port_private *port_priv;
569 unsigned long flags;
570
571 /* Note that we could still be handling received MADs */
572
573 /*
574 * Canceling all sends results in dropping received response
575 * MADs, preventing us from queuing additional work
576 */
577 cancel_mads(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 port_priv = mad_agent_priv->qp_info->port_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
581 spin_lock_irqsave(&port_priv->reg_lock, flags);
582 remove_mad_reg_req(mad_agent_priv);
583 list_del(&mad_agent_priv->agent_list);
584 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
585
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700586 flush_workqueue(port_priv->wq);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700587 ib_cancel_rmpp_recvs(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Sean Hefty1b52fa982006-05-12 14:57:52 -0700589 deref_mad_agent(mad_agent_priv);
590 wait_for_completion(&mad_agent_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
Jesper Juhl6044ec82005-11-07 01:01:32 -0800592 kfree(mad_agent_priv->reg_req);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700593 ib_dereg_mr(mad_agent_priv->agent.mr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 kfree(mad_agent_priv);
595}
596
597static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
598{
599 struct ib_mad_qp_info *qp_info;
600 unsigned long flags;
601
602 qp_info = mad_snoop_priv->qp_info;
603 spin_lock_irqsave(&qp_info->snoop_lock, flags);
604 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
605 atomic_dec(&qp_info->snoop_count);
606 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
607
Sean Hefty1b52fa982006-05-12 14:57:52 -0700608 deref_snoop_agent(mad_snoop_priv);
609 wait_for_completion(&mad_snoop_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
611 kfree(mad_snoop_priv);
612}
613
614/*
615 * ib_unregister_mad_agent - Unregisters a client from using MAD services
616 */
617int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
618{
619 struct ib_mad_agent_private *mad_agent_priv;
620 struct ib_mad_snoop_private *mad_snoop_priv;
621
622 /* If the TID is zero, the agent can only snoop. */
623 if (mad_agent->hi_tid) {
624 mad_agent_priv = container_of(mad_agent,
625 struct ib_mad_agent_private,
626 agent);
627 unregister_mad_agent(mad_agent_priv);
628 } else {
629 mad_snoop_priv = container_of(mad_agent,
630 struct ib_mad_snoop_private,
631 agent);
632 unregister_mad_snoop(mad_snoop_priv);
633 }
634 return 0;
635}
636EXPORT_SYMBOL(ib_unregister_mad_agent);
637
638static void dequeue_mad(struct ib_mad_list_head *mad_list)
639{
640 struct ib_mad_queue *mad_queue;
641 unsigned long flags;
642
643 BUG_ON(!mad_list->mad_queue);
644 mad_queue = mad_list->mad_queue;
645 spin_lock_irqsave(&mad_queue->lock, flags);
646 list_del(&mad_list->list);
647 mad_queue->count--;
648 spin_unlock_irqrestore(&mad_queue->lock, flags);
649}
650
651static void snoop_send(struct ib_mad_qp_info *qp_info,
Sean Hefty34816ad2005-10-25 10:51:39 -0700652 struct ib_mad_send_buf *send_buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 struct ib_mad_send_wc *mad_send_wc,
654 int mad_snoop_flags)
655{
656 struct ib_mad_snoop_private *mad_snoop_priv;
657 unsigned long flags;
658 int i;
659
660 spin_lock_irqsave(&qp_info->snoop_lock, flags);
661 for (i = 0; i < qp_info->snoop_table_size; i++) {
662 mad_snoop_priv = qp_info->snoop_table[i];
663 if (!mad_snoop_priv ||
664 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
665 continue;
666
667 atomic_inc(&mad_snoop_priv->refcount);
668 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
669 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
Sean Hefty34816ad2005-10-25 10:51:39 -0700670 send_buf, mad_send_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -0700671 deref_snoop_agent(mad_snoop_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 spin_lock_irqsave(&qp_info->snoop_lock, flags);
673 }
674 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
675}
676
677static void snoop_recv(struct ib_mad_qp_info *qp_info,
678 struct ib_mad_recv_wc *mad_recv_wc,
679 int mad_snoop_flags)
680{
681 struct ib_mad_snoop_private *mad_snoop_priv;
682 unsigned long flags;
683 int i;
684
685 spin_lock_irqsave(&qp_info->snoop_lock, flags);
686 for (i = 0; i < qp_info->snoop_table_size; i++) {
687 mad_snoop_priv = qp_info->snoop_table[i];
688 if (!mad_snoop_priv ||
689 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
690 continue;
691
692 atomic_inc(&mad_snoop_priv->refcount);
693 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
694 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
695 mad_recv_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -0700696 deref_snoop_agent(mad_snoop_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 spin_lock_irqsave(&qp_info->snoop_lock, flags);
698 }
699 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
700}
701
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200702static void build_smp_wc(struct ib_qp *qp,
703 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 struct ib_wc *wc)
705{
706 memset(wc, 0, sizeof *wc);
707 wc->wr_id = wr_id;
708 wc->status = IB_WC_SUCCESS;
709 wc->opcode = IB_WC_RECV;
710 wc->pkey_index = pkey_index;
711 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
712 wc->src_qp = IB_QP0;
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200713 wc->qp = qp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 wc->slid = slid;
715 wc->sl = 0;
716 wc->dlid_path_bits = 0;
717 wc->port_num = port_num;
718}
719
720/*
721 * Return 0 if SMP is to be sent
722 * Return 1 if SMP was consumed locally (whether or not solicited)
723 * Return < 0 if error
724 */
725static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
Sean Hefty34816ad2005-10-25 10:51:39 -0700726 struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727{
Hal Rosenstockde493d42007-04-02 11:24:07 -0400728 int ret = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -0700729 struct ib_smp *smp = mad_send_wr->send_buf.mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 unsigned long flags;
731 struct ib_mad_local_private *local;
732 struct ib_mad_private *mad_priv;
733 struct ib_mad_port_private *port_priv;
734 struct ib_mad_agent_private *recv_mad_agent = NULL;
735 struct ib_device *device = mad_agent_priv->agent.device;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -0400736 u8 port_num;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 struct ib_wc mad_wc;
Sean Hefty34816ad2005-10-25 10:51:39 -0700738 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Hal Rosenstock1bae4db2007-05-14 17:21:52 -0400740 if (device->node_type == RDMA_NODE_IB_SWITCH &&
741 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
742 port_num = send_wr->wr.ud.port_num;
743 else
744 port_num = mad_agent_priv->agent.port_num;
745
Ralph Campbell8cf3f042006-02-03 14:28:48 -0800746 /*
747 * Directed route handling starts if the initial LID routed part of
748 * a request or the ending LID routed part of a response is empty.
749 * If we are at the start of the LID routed part, don't update the
750 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
751 */
752 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
753 IB_LID_PERMISSIVE &&
Hal Rosenstockde493d42007-04-02 11:24:07 -0400754 smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
755 IB_SMI_DISCARD) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 ret = -EINVAL;
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400757 dev_err(&device->dev, "Invalid directed route\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 goto out;
759 }
Hal Rosenstockde493d42007-04-02 11:24:07 -0400760
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 /* Check to post send on QP or process locally */
Steve Welch727792d2007-10-23 15:06:10 -0700762 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
763 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 goto out;
765
766 local = kmalloc(sizeof *local, GFP_ATOMIC);
767 if (!local) {
768 ret = -ENOMEM;
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400769 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 goto out;
771 }
772 local->mad_priv = NULL;
773 local->recv_mad_agent = NULL;
774 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
775 if (!mad_priv) {
776 ret = -ENOMEM;
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400777 dev_err(&device->dev, "No memory for local response MAD\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 kfree(local);
779 goto out;
780 }
781
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200782 build_smp_wc(mad_agent_priv->agent.qp,
783 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
Sean Hefty97f52eb2005-08-13 21:05:57 -0700784 send_wr->wr.ud.pkey_index,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 send_wr->wr.ud.port_num, &mad_wc);
786
787 /* No GRH for DR SMP */
788 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
789 (struct ib_mad *)smp,
790 (struct ib_mad *)&mad_priv->mad);
791 switch (ret)
792 {
793 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
Ira Weiny96909302015-05-08 14:27:22 -0400794 if (ib_response_mad(&mad_priv->mad.mad.mad_hdr) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 mad_agent_priv->agent.recv_handler) {
796 local->mad_priv = mad_priv;
797 local->recv_mad_agent = mad_agent_priv;
798 /*
799 * Reference MAD agent until receive
800 * side of local completion handled
801 */
802 atomic_inc(&mad_agent_priv->refcount);
803 } else
804 kmem_cache_free(ib_mad_cache, mad_priv);
805 break;
806 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
807 kmem_cache_free(ib_mad_cache, mad_priv);
Ralph Campbell4780c192009-03-03 14:22:17 -0800808 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 case IB_MAD_RESULT_SUCCESS:
810 /* Treat like an incoming receive MAD */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
812 mad_agent_priv->agent.port_num);
813 if (port_priv) {
Steve Welch727792d2007-10-23 15:06:10 -0700814 memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 recv_mad_agent = find_mad_agent(port_priv,
Ira Weinyd94bd262015-06-06 14:38:22 -0400816 &mad_priv->mad.mad.mad_hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 }
818 if (!port_priv || !recv_mad_agent) {
Ralph Campbell4780c192009-03-03 14:22:17 -0800819 /*
820 * No receiving agent so drop packet and
821 * generate send completion.
822 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 kmem_cache_free(ib_mad_cache, mad_priv);
Ralph Campbell4780c192009-03-03 14:22:17 -0800824 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 }
826 local->mad_priv = mad_priv;
827 local->recv_mad_agent = recv_mad_agent;
828 break;
829 default:
830 kmem_cache_free(ib_mad_cache, mad_priv);
831 kfree(local);
832 ret = -EINVAL;
833 goto out;
834 }
835
Sean Hefty34816ad2005-10-25 10:51:39 -0700836 local->mad_send_wr = mad_send_wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 /* Reference MAD agent until send side of local completion handled */
838 atomic_inc(&mad_agent_priv->refcount);
839 /* Queue local completion to local list */
840 spin_lock_irqsave(&mad_agent_priv->lock, flags);
841 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
842 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
843 queue_work(mad_agent_priv->qp_info->port_priv->wq,
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700844 &mad_agent_priv->local_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 ret = 1;
846out:
847 return ret;
848}
849
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800850static int get_pad_size(int hdr_len, int data_len)
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700851{
852 int seg_size, pad;
853
854 seg_size = sizeof(struct ib_mad) - hdr_len;
855 if (data_len && seg_size) {
856 pad = seg_size - data_len % seg_size;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800857 return pad == seg_size ? 0 : pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700858 } else
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800859 return seg_size;
860}
861
862static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
863{
864 struct ib_rmpp_segment *s, *t;
865
866 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
867 list_del(&s->list);
868 kfree(s);
869 }
870}
871
872static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
873 gfp_t gfp_mask)
874{
875 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
876 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
877 struct ib_rmpp_segment *seg = NULL;
878 int left, seg_size, pad;
879
880 send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
881 seg_size = send_buf->seg_size;
882 pad = send_wr->pad;
883
884 /* Allocate data segments. */
885 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
886 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
887 if (!seg) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400888 dev_err(&send_buf->mad_agent->device->dev,
889 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
890 sizeof (*seg) + seg_size, gfp_mask);
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800891 free_send_rmpp_list(send_wr);
892 return -ENOMEM;
893 }
894 seg->num = ++send_buf->seg_count;
895 list_add_tail(&seg->list, &send_wr->rmpp_list);
896 }
897
898 /* Zero any padding */
899 if (pad)
900 memset(seg->data + seg_size - pad, 0, pad);
901
902 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
903 agent.rmpp_version;
904 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
905 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
906
907 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
908 struct ib_rmpp_segment, list);
909 send_wr->last_ack_seg = send_wr->cur_seg;
910 return 0;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700911}
912
Ira Weinyf766c582015-05-08 14:27:24 -0400913int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
Ira Weiny1471cb62014-08-08 19:00:56 -0400914{
915 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
916}
917EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
918
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700919struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
920 u32 remote_qpn, u16 pkey_index,
Sean Hefty34816ad2005-10-25 10:51:39 -0700921 int rmpp_active,
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700922 int hdr_len, int data_len,
Ira Weinyda2dfaa2015-06-06 14:38:28 -0400923 gfp_t gfp_mask,
924 u8 base_version)
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700925{
926 struct ib_mad_agent_private *mad_agent_priv;
Sean Hefty34816ad2005-10-25 10:51:39 -0700927 struct ib_mad_send_wr_private *mad_send_wr;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800928 int pad, message_size, ret, size;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700929 void *buf;
930
Sean Hefty34816ad2005-10-25 10:51:39 -0700931 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
932 agent);
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800933 pad = get_pad_size(hdr_len, data_len);
934 message_size = hdr_len + data_len + pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700935
Ira Weiny1471cb62014-08-08 19:00:56 -0400936 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
937 if (!rmpp_active && message_size > sizeof(struct ib_mad))
938 return ERR_PTR(-EINVAL);
939 } else
940 if (rmpp_active || message_size > sizeof(struct ib_mad))
941 return ERR_PTR(-EINVAL);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700942
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800943 size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
944 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700945 if (!buf)
946 return ERR_PTR(-ENOMEM);
947
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800948 mad_send_wr = buf + size;
949 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
Sean Hefty34816ad2005-10-25 10:51:39 -0700950 mad_send_wr->send_buf.mad = buf;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800951 mad_send_wr->send_buf.hdr_len = hdr_len;
952 mad_send_wr->send_buf.data_len = data_len;
953 mad_send_wr->pad = pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700954
Sean Hefty34816ad2005-10-25 10:51:39 -0700955 mad_send_wr->mad_agent_priv = mad_agent_priv;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800956 mad_send_wr->sg_list[0].length = hdr_len;
Sean Hefty34816ad2005-10-25 10:51:39 -0700957 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800958 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
959 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700960
Sean Hefty34816ad2005-10-25 10:51:39 -0700961 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
962 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800963 mad_send_wr->send_wr.num_sge = 2;
Sean Hefty34816ad2005-10-25 10:51:39 -0700964 mad_send_wr->send_wr.opcode = IB_WR_SEND;
965 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
966 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
967 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
968 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700969
970 if (rmpp_active) {
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800971 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
972 if (ret) {
973 kfree(buf);
974 return ERR_PTR(ret);
975 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700976 }
977
Sean Hefty34816ad2005-10-25 10:51:39 -0700978 mad_send_wr->send_buf.mad_agent = mad_agent;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700979 atomic_inc(&mad_agent_priv->refcount);
Sean Hefty34816ad2005-10-25 10:51:39 -0700980 return &mad_send_wr->send_buf;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700981}
982EXPORT_SYMBOL(ib_create_send_mad);
983
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800984int ib_get_mad_data_offset(u8 mgmt_class)
985{
986 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
987 return IB_MGMT_SA_HDR;
988 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
989 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
990 (mgmt_class == IB_MGMT_CLASS_BIS))
991 return IB_MGMT_DEVICE_HDR;
992 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
993 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
994 return IB_MGMT_VENDOR_HDR;
995 else
996 return IB_MGMT_MAD_HDR;
997}
998EXPORT_SYMBOL(ib_get_mad_data_offset);
999
1000int ib_is_mad_class_rmpp(u8 mgmt_class)
1001{
1002 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1003 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1004 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1005 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1006 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1007 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1008 return 1;
1009 return 0;
1010}
1011EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1012
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001013void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1014{
1015 struct ib_mad_send_wr_private *mad_send_wr;
1016 struct list_head *list;
1017
1018 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1019 send_buf);
1020 list = &mad_send_wr->cur_seg->list;
1021
1022 if (mad_send_wr->cur_seg->num < seg_num) {
1023 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1024 if (mad_send_wr->cur_seg->num == seg_num)
1025 break;
1026 } else if (mad_send_wr->cur_seg->num > seg_num) {
1027 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1028 if (mad_send_wr->cur_seg->num == seg_num)
1029 break;
1030 }
1031 return mad_send_wr->cur_seg->data;
1032}
1033EXPORT_SYMBOL(ib_get_rmpp_segment);
1034
1035static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1036{
1037 if (mad_send_wr->send_buf.seg_count)
1038 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1039 mad_send_wr->seg_num);
1040 else
1041 return mad_send_wr->send_buf.mad +
1042 mad_send_wr->send_buf.hdr_len;
1043}
1044
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001045void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1046{
1047 struct ib_mad_agent_private *mad_agent_priv;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001048 struct ib_mad_send_wr_private *mad_send_wr;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001049
1050 mad_agent_priv = container_of(send_buf->mad_agent,
1051 struct ib_mad_agent_private, agent);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001052 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1053 send_buf);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001054
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001055 free_send_rmpp_list(mad_send_wr);
1056 kfree(send_buf->mad);
Sean Hefty1b52fa982006-05-12 14:57:52 -07001057 deref_mad_agent(mad_agent_priv);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001058}
1059EXPORT_SYMBOL(ib_free_send_mad);
1060
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001061int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062{
1063 struct ib_mad_qp_info *qp_info;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001064 struct list_head *list;
Sean Hefty34816ad2005-10-25 10:51:39 -07001065 struct ib_send_wr *bad_send_wr;
1066 struct ib_mad_agent *mad_agent;
1067 struct ib_sge *sge;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 unsigned long flags;
1069 int ret;
1070
Hal Rosenstockf8197a42005-07-27 11:45:24 -07001071 /* Set WR ID to find mad_send_wr upon completion */
Hal Rosenstockd760ce82005-07-27 11:45:25 -07001072 qp_info = mad_send_wr->mad_agent_priv->qp_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1074 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1075
Sean Hefty34816ad2005-10-25 10:51:39 -07001076 mad_agent = mad_send_wr->send_buf.mad_agent;
1077 sge = mad_send_wr->sg_list;
Ralph Campbell15271062006-12-12 14:28:30 -08001078 sge[0].addr = ib_dma_map_single(mad_agent->device,
1079 mad_send_wr->send_buf.mad,
1080 sge[0].length,
1081 DMA_TO_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02001082 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1083 return -ENOMEM;
1084
Ralph Campbell15271062006-12-12 14:28:30 -08001085 mad_send_wr->header_mapping = sge[0].addr;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001086
Ralph Campbell15271062006-12-12 14:28:30 -08001087 sge[1].addr = ib_dma_map_single(mad_agent->device,
1088 ib_get_payload(mad_send_wr),
1089 sge[1].length,
1090 DMA_TO_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02001091 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1092 ib_dma_unmap_single(mad_agent->device,
1093 mad_send_wr->header_mapping,
1094 sge[0].length, DMA_TO_DEVICE);
1095 return -ENOMEM;
1096 }
Ralph Campbell15271062006-12-12 14:28:30 -08001097 mad_send_wr->payload_mapping = sge[1].addr;
Sean Hefty34816ad2005-10-25 10:51:39 -07001098
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001100 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
Sean Hefty34816ad2005-10-25 10:51:39 -07001101 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1102 &bad_send_wr);
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001103 list = &qp_info->send_queue.list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 ret = 0;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001106 list = &qp_info->overflow_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 }
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001108
1109 if (!ret) {
1110 qp_info->send_queue.count++;
1111 list_add_tail(&mad_send_wr->mad_list.list, list);
1112 }
1113 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001114 if (ret) {
Ralph Campbell15271062006-12-12 14:28:30 -08001115 ib_dma_unmap_single(mad_agent->device,
1116 mad_send_wr->header_mapping,
1117 sge[0].length, DMA_TO_DEVICE);
1118 ib_dma_unmap_single(mad_agent->device,
1119 mad_send_wr->payload_mapping,
1120 sge[1].length, DMA_TO_DEVICE);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001121 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 return ret;
1123}
1124
1125/*
1126 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1127 * with the registered client
1128 */
Sean Hefty34816ad2005-10-25 10:51:39 -07001129int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1130 struct ib_mad_send_buf **bad_send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 struct ib_mad_agent_private *mad_agent_priv;
Sean Hefty34816ad2005-10-25 10:51:39 -07001133 struct ib_mad_send_buf *next_send_buf;
1134 struct ib_mad_send_wr_private *mad_send_wr;
1135 unsigned long flags;
1136 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137
1138 /* Walk list of send WRs and post each on send list */
Sean Hefty34816ad2005-10-25 10:51:39 -07001139 for (; send_buf; send_buf = next_send_buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140
Sean Hefty34816ad2005-10-25 10:51:39 -07001141 mad_send_wr = container_of(send_buf,
1142 struct ib_mad_send_wr_private,
1143 send_buf);
1144 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
Sean Hefty34816ad2005-10-25 10:51:39 -07001146 if (!send_buf->mad_agent->send_handler ||
1147 (send_buf->timeout_ms &&
1148 !send_buf->mad_agent->recv_handler)) {
1149 ret = -EINVAL;
1150 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 }
1152
Hal Rosenstock618a3c02006-03-28 16:40:04 -08001153 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1154 if (mad_agent_priv->agent.rmpp_version) {
1155 ret = -EINVAL;
1156 goto error;
1157 }
1158 }
1159
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 /*
1161 * Save pointer to next work request to post in case the
1162 * current one completes, and the user modifies the work
1163 * request associated with the completion
1164 */
Sean Hefty34816ad2005-10-25 10:51:39 -07001165 next_send_buf = send_buf->next;
1166 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
Sean Hefty34816ad2005-10-25 10:51:39 -07001168 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1169 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1170 ret = handle_outgoing_dr_smp(mad_agent_priv,
1171 mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 if (ret < 0) /* error */
Sean Hefty34816ad2005-10-25 10:51:39 -07001173 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 else if (ret == 1) /* locally consumed */
Sean Hefty34816ad2005-10-25 10:51:39 -07001175 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 }
1177
Sean Hefty34816ad2005-10-25 10:51:39 -07001178 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 /* Timeout will be updated after send completes */
Sean Hefty34816ad2005-10-25 10:51:39 -07001180 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
Sean Hefty4fc8cd42007-11-27 00:11:04 -08001181 mad_send_wr->max_retries = send_buf->retries;
1182 mad_send_wr->retries_left = send_buf->retries;
1183 send_buf->retries = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001184 /* Reference for work request to QP + response */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1186 mad_send_wr->status = IB_WC_SUCCESS;
1187
1188 /* Reference MAD agent until send completes */
1189 atomic_inc(&mad_agent_priv->refcount);
1190 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1191 list_add_tail(&mad_send_wr->agent_list,
1192 &mad_agent_priv->send_list);
1193 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1194
Ira Weiny1471cb62014-08-08 19:00:56 -04001195 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001196 ret = ib_send_rmpp_mad(mad_send_wr);
1197 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1198 ret = ib_send_mad(mad_send_wr);
1199 } else
1200 ret = ib_send_mad(mad_send_wr);
1201 if (ret < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 /* Fail send request */
1203 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1204 list_del(&mad_send_wr->agent_list);
1205 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1206 atomic_dec(&mad_agent_priv->refcount);
Sean Hefty34816ad2005-10-25 10:51:39 -07001207 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 }
1210 return 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001211error:
1212 if (bad_send_buf)
1213 *bad_send_buf = send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 return ret;
1215}
1216EXPORT_SYMBOL(ib_post_send_mad);
1217
1218/*
1219 * ib_free_recv_mad - Returns data buffers used to receive
1220 * a MAD to the access layer
1221 */
1222void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1223{
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001224 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 struct ib_mad_private_header *mad_priv_hdr;
1226 struct ib_mad_private *priv;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001227 struct list_head free_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001229 INIT_LIST_HEAD(&free_list);
1230 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001232 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1233 &free_list, list) {
1234 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1235 recv_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 mad_priv_hdr = container_of(mad_recv_wc,
1237 struct ib_mad_private_header,
1238 recv_wc);
1239 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1240 header);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001241 kmem_cache_free(ib_mad_cache, priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243}
1244EXPORT_SYMBOL(ib_free_recv_mad);
1245
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1247 u8 rmpp_version,
1248 ib_mad_send_handler send_handler,
1249 ib_mad_recv_handler recv_handler,
1250 void *context)
1251{
1252 return ERR_PTR(-EINVAL); /* XXX: for now */
1253}
1254EXPORT_SYMBOL(ib_redirect_mad_qp);
1255
1256int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1257 struct ib_wc *wc)
1258{
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001259 dev_err(&mad_agent->device->dev,
1260 "ib_process_mad_wc() not implemented yet\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 return 0;
1262}
1263EXPORT_SYMBOL(ib_process_mad_wc);
1264
1265static int method_in_use(struct ib_mad_mgmt_method_table **method,
1266 struct ib_mad_reg_req *mad_reg_req)
1267{
1268 int i;
1269
Akinobu Mita19b629f2010-03-05 13:41:38 -08001270 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 if ((*method)->agent[i]) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001272 pr_err("Method %d already in use\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 return -EINVAL;
1274 }
1275 }
1276 return 0;
1277}
1278
1279static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1280{
1281 /* Allocate management method table */
Roland Dreierde6eb662005-11-02 07:23:14 -08001282 *method = kzalloc(sizeof **method, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 if (!*method) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001284 pr_err("No memory for ib_mad_mgmt_method_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 return -ENOMEM;
1286 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287
1288 return 0;
1289}
1290
1291/*
1292 * Check to see if there are any methods still in use
1293 */
1294static int check_method_table(struct ib_mad_mgmt_method_table *method)
1295{
1296 int i;
1297
1298 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1299 if (method->agent[i])
1300 return 1;
1301 return 0;
1302}
1303
1304/*
1305 * Check to see if there are any method tables for this class still in use
1306 */
1307static int check_class_table(struct ib_mad_mgmt_class_table *class)
1308{
1309 int i;
1310
1311 for (i = 0; i < MAX_MGMT_CLASS; i++)
1312 if (class->method_table[i])
1313 return 1;
1314 return 0;
1315}
1316
1317static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1318{
1319 int i;
1320
1321 for (i = 0; i < MAX_MGMT_OUI; i++)
1322 if (vendor_class->method_table[i])
1323 return 1;
1324 return 0;
1325}
1326
1327static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
Ira Weinyd94bd262015-06-06 14:38:22 -04001328 const char *oui)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329{
1330 int i;
1331
1332 for (i = 0; i < MAX_MGMT_OUI; i++)
Roland Dreier3cd96562006-09-22 15:22:46 -07001333 /* Is there matching OUI for this vendor class ? */
1334 if (!memcmp(vendor_class->oui[i], oui, 3))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 return i;
1336
1337 return -1;
1338}
1339
1340static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1341{
1342 int i;
1343
1344 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1345 if (vendor->vendor_class[i])
1346 return 1;
1347
1348 return 0;
1349}
1350
1351static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1352 struct ib_mad_agent_private *agent)
1353{
1354 int i;
1355
1356 /* Remove any methods for this mad agent */
1357 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1358 if (method->agent[i] == agent) {
1359 method->agent[i] = NULL;
1360 }
1361 }
1362}
1363
1364static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1365 struct ib_mad_agent_private *agent_priv,
1366 u8 mgmt_class)
1367{
1368 struct ib_mad_port_private *port_priv;
1369 struct ib_mad_mgmt_class_table **class;
1370 struct ib_mad_mgmt_method_table **method;
1371 int i, ret;
1372
1373 port_priv = agent_priv->qp_info->port_priv;
1374 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1375 if (!*class) {
1376 /* Allocate management class table for "new" class version */
Roland Dreierde6eb662005-11-02 07:23:14 -08001377 *class = kzalloc(sizeof **class, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 if (!*class) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001379 dev_err(&agent_priv->agent.device->dev,
1380 "No memory for ib_mad_mgmt_class_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 ret = -ENOMEM;
1382 goto error1;
1383 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001384
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 /* Allocate method table for this management class */
1386 method = &(*class)->method_table[mgmt_class];
1387 if ((ret = allocate_method_table(method)))
1388 goto error2;
1389 } else {
1390 method = &(*class)->method_table[mgmt_class];
1391 if (!*method) {
1392 /* Allocate method table for this management class */
1393 if ((ret = allocate_method_table(method)))
1394 goto error1;
1395 }
1396 }
1397
1398 /* Now, make sure methods are not already in use */
1399 if (method_in_use(method, mad_reg_req))
1400 goto error3;
1401
1402 /* Finally, add in methods being registered */
Akinobu Mita19b629f2010-03-05 13:41:38 -08001403 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 (*method)->agent[i] = agent_priv;
Akinobu Mita19b629f2010-03-05 13:41:38 -08001405
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 return 0;
1407
1408error3:
1409 /* Remove any methods for this mad agent */
1410 remove_methods_mad_agent(*method, agent_priv);
1411 /* Now, check to see if there are any methods in use */
1412 if (!check_method_table(*method)) {
1413 /* If not, release management method table */
1414 kfree(*method);
1415 *method = NULL;
1416 }
1417 ret = -EINVAL;
1418 goto error1;
1419error2:
1420 kfree(*class);
1421 *class = NULL;
1422error1:
1423 return ret;
1424}
1425
1426static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1427 struct ib_mad_agent_private *agent_priv)
1428{
1429 struct ib_mad_port_private *port_priv;
1430 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1431 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1432 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1433 struct ib_mad_mgmt_method_table **method;
1434 int i, ret = -ENOMEM;
1435 u8 vclass;
1436
1437 /* "New" vendor (with OUI) class */
1438 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1439 port_priv = agent_priv->qp_info->port_priv;
1440 vendor_table = &port_priv->version[
1441 mad_reg_req->mgmt_class_version].vendor;
1442 if (!*vendor_table) {
1443 /* Allocate mgmt vendor class table for "new" class version */
Roland Dreierde6eb662005-11-02 07:23:14 -08001444 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 if (!vendor) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001446 dev_err(&agent_priv->agent.device->dev,
1447 "No memory for ib_mad_mgmt_vendor_class_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 goto error1;
1449 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 *vendor_table = vendor;
1452 }
1453 if (!(*vendor_table)->vendor_class[vclass]) {
1454 /* Allocate table for this management vendor class */
Roland Dreierde6eb662005-11-02 07:23:14 -08001455 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 if (!vendor_class) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001457 dev_err(&agent_priv->agent.device->dev,
1458 "No memory for ib_mad_mgmt_vendor_class\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 goto error2;
1460 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 (*vendor_table)->vendor_class[vclass] = vendor_class;
1463 }
1464 for (i = 0; i < MAX_MGMT_OUI; i++) {
1465 /* Is there matching OUI for this vendor class ? */
1466 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1467 mad_reg_req->oui, 3)) {
1468 method = &(*vendor_table)->vendor_class[
1469 vclass]->method_table[i];
1470 BUG_ON(!*method);
1471 goto check_in_use;
1472 }
1473 }
1474 for (i = 0; i < MAX_MGMT_OUI; i++) {
1475 /* OUI slot available ? */
1476 if (!is_vendor_oui((*vendor_table)->vendor_class[
1477 vclass]->oui[i])) {
1478 method = &(*vendor_table)->vendor_class[
1479 vclass]->method_table[i];
1480 BUG_ON(*method);
1481 /* Allocate method table for this OUI */
1482 if ((ret = allocate_method_table(method)))
1483 goto error3;
1484 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1485 mad_reg_req->oui, 3);
1486 goto check_in_use;
1487 }
1488 }
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001489 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 goto error3;
1491
1492check_in_use:
1493 /* Now, make sure methods are not already in use */
1494 if (method_in_use(method, mad_reg_req))
1495 goto error4;
1496
1497 /* Finally, add in methods being registered */
Akinobu Mita19b629f2010-03-05 13:41:38 -08001498 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 (*method)->agent[i] = agent_priv;
Akinobu Mita19b629f2010-03-05 13:41:38 -08001500
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 return 0;
1502
1503error4:
1504 /* Remove any methods for this mad agent */
1505 remove_methods_mad_agent(*method, agent_priv);
1506 /* Now, check to see if there are any methods in use */
1507 if (!check_method_table(*method)) {
1508 /* If not, release management method table */
1509 kfree(*method);
1510 *method = NULL;
1511 }
1512 ret = -EINVAL;
1513error3:
1514 if (vendor_class) {
1515 (*vendor_table)->vendor_class[vclass] = NULL;
1516 kfree(vendor_class);
1517 }
1518error2:
1519 if (vendor) {
1520 *vendor_table = NULL;
1521 kfree(vendor);
1522 }
1523error1:
1524 return ret;
1525}
1526
1527static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1528{
1529 struct ib_mad_port_private *port_priv;
1530 struct ib_mad_mgmt_class_table *class;
1531 struct ib_mad_mgmt_method_table *method;
1532 struct ib_mad_mgmt_vendor_class_table *vendor;
1533 struct ib_mad_mgmt_vendor_class *vendor_class;
1534 int index;
1535 u8 mgmt_class;
1536
1537 /*
1538 * Was MAD registration request supplied
1539 * with original registration ?
1540 */
1541 if (!agent_priv->reg_req) {
1542 goto out;
1543 }
1544
1545 port_priv = agent_priv->qp_info->port_priv;
1546 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1547 class = port_priv->version[
1548 agent_priv->reg_req->mgmt_class_version].class;
1549 if (!class)
1550 goto vendor_check;
1551
1552 method = class->method_table[mgmt_class];
1553 if (method) {
1554 /* Remove any methods for this mad agent */
1555 remove_methods_mad_agent(method, agent_priv);
1556 /* Now, check to see if there are any methods still in use */
1557 if (!check_method_table(method)) {
1558 /* If not, release management method table */
1559 kfree(method);
1560 class->method_table[mgmt_class] = NULL;
1561 /* Any management classes left ? */
1562 if (!check_class_table(class)) {
1563 /* If not, release management class table */
1564 kfree(class);
1565 port_priv->version[
1566 agent_priv->reg_req->
1567 mgmt_class_version].class = NULL;
1568 }
1569 }
1570 }
1571
1572vendor_check:
1573 if (!is_vendor_class(mgmt_class))
1574 goto out;
1575
1576 /* normalize mgmt_class to vendor range 2 */
1577 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1578 vendor = port_priv->version[
1579 agent_priv->reg_req->mgmt_class_version].vendor;
1580
1581 if (!vendor)
1582 goto out;
1583
1584 vendor_class = vendor->vendor_class[mgmt_class];
1585 if (vendor_class) {
1586 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1587 if (index < 0)
1588 goto out;
1589 method = vendor_class->method_table[index];
1590 if (method) {
1591 /* Remove any methods for this mad agent */
1592 remove_methods_mad_agent(method, agent_priv);
1593 /*
1594 * Now, check to see if there are
1595 * any methods still in use
1596 */
1597 if (!check_method_table(method)) {
1598 /* If not, release management method table */
1599 kfree(method);
1600 vendor_class->method_table[index] = NULL;
1601 memset(vendor_class->oui[index], 0, 3);
1602 /* Any OUIs left ? */
1603 if (!check_vendor_class(vendor_class)) {
1604 /* If not, release vendor class table */
1605 kfree(vendor_class);
1606 vendor->vendor_class[mgmt_class] = NULL;
1607 /* Any other vendor classes left ? */
1608 if (!check_vendor_table(vendor)) {
1609 kfree(vendor);
1610 port_priv->version[
1611 agent_priv->reg_req->
1612 mgmt_class_version].
1613 vendor = NULL;
1614 }
1615 }
1616 }
1617 }
1618 }
1619
1620out:
1621 return;
1622}
1623
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624static struct ib_mad_agent_private *
1625find_mad_agent(struct ib_mad_port_private *port_priv,
Ira Weinyd94bd262015-06-06 14:38:22 -04001626 const struct ib_mad_hdr *mad_hdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627{
1628 struct ib_mad_agent_private *mad_agent = NULL;
1629 unsigned long flags;
1630
1631 spin_lock_irqsave(&port_priv->reg_lock, flags);
Ira Weinyd94bd262015-06-06 14:38:22 -04001632 if (ib_response_mad(mad_hdr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 u32 hi_tid;
1634 struct ib_mad_agent_private *entry;
1635
1636 /*
1637 * Routing is based on high 32 bits of transaction ID
1638 * of MAD.
1639 */
Ira Weinyd94bd262015-06-06 14:38:22 -04001640 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
Sean Hefty34816ad2005-10-25 10:51:39 -07001641 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 if (entry->agent.hi_tid == hi_tid) {
1643 mad_agent = entry;
1644 break;
1645 }
1646 }
1647 } else {
1648 struct ib_mad_mgmt_class_table *class;
1649 struct ib_mad_mgmt_method_table *method;
1650 struct ib_mad_mgmt_vendor_class_table *vendor;
1651 struct ib_mad_mgmt_vendor_class *vendor_class;
Ira Weinyd94bd262015-06-06 14:38:22 -04001652 const struct ib_vendor_mad *vendor_mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 int index;
1654
1655 /*
1656 * Routing is based on version, class, and method
1657 * For "newer" vendor MADs, also based on OUI
1658 */
Ira Weinyd94bd262015-06-06 14:38:22 -04001659 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 goto out;
Ira Weinyd94bd262015-06-06 14:38:22 -04001661 if (!is_vendor_class(mad_hdr->mgmt_class)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 class = port_priv->version[
Ira Weinyd94bd262015-06-06 14:38:22 -04001663 mad_hdr->class_version].class;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 if (!class)
1665 goto out;
Ira Weinyd94bd262015-06-06 14:38:22 -04001666 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
Hefty, Seanb7ab0b12011-10-06 09:33:05 -07001667 IB_MGMT_MAX_METHODS)
1668 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 method = class->method_table[convert_mgmt_class(
Ira Weinyd94bd262015-06-06 14:38:22 -04001670 mad_hdr->mgmt_class)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 if (method)
Ira Weinyd94bd262015-06-06 14:38:22 -04001672 mad_agent = method->agent[mad_hdr->method &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 ~IB_MGMT_METHOD_RESP];
1674 } else {
1675 vendor = port_priv->version[
Ira Weinyd94bd262015-06-06 14:38:22 -04001676 mad_hdr->class_version].vendor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 if (!vendor)
1678 goto out;
1679 vendor_class = vendor->vendor_class[vendor_class_index(
Ira Weinyd94bd262015-06-06 14:38:22 -04001680 mad_hdr->mgmt_class)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 if (!vendor_class)
1682 goto out;
1683 /* Find matching OUI */
Ira Weinyd94bd262015-06-06 14:38:22 -04001684 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1686 if (index == -1)
1687 goto out;
1688 method = vendor_class->method_table[index];
1689 if (method) {
Ira Weinyd94bd262015-06-06 14:38:22 -04001690 mad_agent = method->agent[mad_hdr->method &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 ~IB_MGMT_METHOD_RESP];
1692 }
1693 }
1694 }
1695
1696 if (mad_agent) {
1697 if (mad_agent->agent.recv_handler)
1698 atomic_inc(&mad_agent->refcount);
1699 else {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001700 dev_notice(&port_priv->device->dev,
1701 "No receive handler for client %p on port %d\n",
1702 &mad_agent->agent, port_priv->port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 mad_agent = NULL;
1704 }
1705 }
1706out:
1707 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1708
1709 return mad_agent;
1710}
1711
Ira Weiny77f60832015-05-08 14:27:21 -04001712static int validate_mad(const struct ib_mad_hdr *mad_hdr, u32 qp_num)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713{
1714 int valid = 0;
1715
1716 /* Make sure MAD base version is understood */
Ira Weiny77f60832015-05-08 14:27:21 -04001717 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001718 pr_err("MAD received with unsupported base version %d\n",
Ira Weiny77f60832015-05-08 14:27:21 -04001719 mad_hdr->base_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 goto out;
1721 }
1722
1723 /* Filter SMI packets sent to other than QP0 */
Ira Weiny77f60832015-05-08 14:27:21 -04001724 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1725 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 if (qp_num == 0)
1727 valid = 1;
1728 } else {
1729 /* Filter GSI packets sent to QP0 */
1730 if (qp_num != 0)
1731 valid = 1;
1732 }
1733
1734out:
1735 return valid;
1736}
1737
Ira Weinyf766c582015-05-08 14:27:24 -04001738static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1739 const struct ib_mad_hdr *mad_hdr)
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001740{
1741 struct ib_rmpp_mad *rmpp_mad;
1742
1743 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1744 return !mad_agent_priv->agent.rmpp_version ||
Ira Weiny1471cb62014-08-08 19:00:56 -04001745 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001746 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1747 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1748 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1749}
1750
Ira Weiny8bf4b302015-05-08 14:27:23 -04001751static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1752 const struct ib_mad_recv_wc *rwc)
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001753{
Ira Weiny8bf4b302015-05-08 14:27:23 -04001754 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001755 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1756}
1757
Ira Weinyf766c582015-05-08 14:27:24 -04001758static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1759 const struct ib_mad_send_wr_private *wr,
1760 const struct ib_mad_recv_wc *rwc )
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001761{
1762 struct ib_ah_attr attr;
1763 u8 send_resp, rcv_resp;
Jack Morgenstein9874e742006-06-17 20:37:34 -07001764 union ib_gid sgid;
1765 struct ib_device *device = mad_agent_priv->agent.device;
1766 u8 port_num = mad_agent_priv->agent.port_num;
1767 u8 lmc;
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001768
Ira Weiny96909302015-05-08 14:27:22 -04001769 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1770 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001771
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001772 if (send_resp == rcv_resp)
1773 /* both requests, or both responses. GIDs different */
1774 return 0;
1775
1776 if (ib_query_ah(wr->send_buf.ah, &attr))
1777 /* Assume not equal, to avoid false positives. */
1778 return 0;
1779
Jack Morgenstein9874e742006-06-17 20:37:34 -07001780 if (!!(attr.ah_flags & IB_AH_GRH) !=
1781 !!(rwc->wc->wc_flags & IB_WC_GRH))
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001782 /* one has GID, other does not. Assume different */
1783 return 0;
Jack Morgenstein9874e742006-06-17 20:37:34 -07001784
1785 if (!send_resp && rcv_resp) {
1786 /* is request/response. */
1787 if (!(attr.ah_flags & IB_AH_GRH)) {
1788 if (ib_get_cached_lmc(device, port_num, &lmc))
1789 return 0;
1790 return (!lmc || !((attr.src_path_bits ^
1791 rwc->wc->dlid_path_bits) &
1792 ((1 << lmc) - 1)));
1793 } else {
1794 if (ib_get_cached_gid(device, port_num,
1795 attr.grh.sgid_index, &sgid))
1796 return 0;
1797 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1798 16);
1799 }
1800 }
1801
1802 if (!(attr.ah_flags & IB_AH_GRH))
1803 return attr.dlid == rwc->wc->slid;
1804 else
1805 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1806 16);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001807}
Jack Morgenstein9874e742006-06-17 20:37:34 -07001808
1809static inline int is_direct(u8 class)
1810{
1811 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1812}
1813
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001814struct ib_mad_send_wr_private*
Ira Weinyf766c582015-05-08 14:27:24 -04001815ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1816 const struct ib_mad_recv_wc *wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817{
Jack Morgenstein9874e742006-06-17 20:37:34 -07001818 struct ib_mad_send_wr_private *wr;
Ira Weiny83a1d222015-06-06 14:38:23 -04001819 const struct ib_mad_hdr *mad_hdr;
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001820
Ira Weiny83a1d222015-06-06 14:38:23 -04001821 mad_hdr = &wc->recv_buf.mad->mad_hdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
Jack Morgenstein9874e742006-06-17 20:37:34 -07001823 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
Ira Weiny83a1d222015-06-06 14:38:23 -04001824 if ((wr->tid == mad_hdr->tid) &&
Jack Morgenstein9874e742006-06-17 20:37:34 -07001825 rcv_has_same_class(wr, wc) &&
1826 /*
1827 * Don't check GID for direct routed MADs.
1828 * These might have permissive LIDs.
1829 */
Ira Weiny83a1d222015-06-06 14:38:23 -04001830 (is_direct(mad_hdr->mgmt_class) ||
Jack Morgenstein9874e742006-06-17 20:37:34 -07001831 rcv_has_same_gid(mad_agent_priv, wr, wc)))
Roland Dreier39798692006-11-13 09:38:07 -08001832 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 }
1834
1835 /*
1836 * It's possible to receive the response before we've
1837 * been notified that the send has completed
1838 */
Jack Morgenstein9874e742006-06-17 20:37:34 -07001839 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
Ira Weinyc597eee2015-05-08 13:10:03 -04001840 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
Ira Weiny83a1d222015-06-06 14:38:23 -04001841 wr->tid == mad_hdr->tid &&
Jack Morgenstein9874e742006-06-17 20:37:34 -07001842 wr->timeout &&
1843 rcv_has_same_class(wr, wc) &&
1844 /*
1845 * Don't check GID for direct routed MADs.
1846 * These might have permissive LIDs.
1847 */
Ira Weiny83a1d222015-06-06 14:38:23 -04001848 (is_direct(mad_hdr->mgmt_class) ||
Jack Morgenstein9874e742006-06-17 20:37:34 -07001849 rcv_has_same_gid(mad_agent_priv, wr, wc)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 /* Verify request has not been canceled */
Jack Morgenstein9874e742006-06-17 20:37:34 -07001851 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 }
1853 return NULL;
1854}
1855
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001856void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001857{
1858 mad_send_wr->timeout = 0;
Akinobu Mita179e0912006-06-26 00:24:41 -07001859 if (mad_send_wr->refcount == 1)
1860 list_move_tail(&mad_send_wr->agent_list,
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001861 &mad_send_wr->mad_agent_priv->done_list);
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001862}
1863
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001865 struct ib_mad_recv_wc *mad_recv_wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866{
1867 struct ib_mad_send_wr_private *mad_send_wr;
1868 struct ib_mad_send_wc mad_send_wc;
1869 unsigned long flags;
1870
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001871 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1872 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
Ira Weiny1471cb62014-08-08 19:00:56 -04001873 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001874 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1875 mad_recv_wc);
1876 if (!mad_recv_wc) {
Sean Hefty1b52fa982006-05-12 14:57:52 -07001877 deref_mad_agent(mad_agent_priv);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001878 return;
1879 }
1880 }
1881
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 /* Complete corresponding request */
Ira Weiny96909302015-05-08 14:27:22 -04001883 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001885 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 if (!mad_send_wr) {
1887 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Ira Weiny1471cb62014-08-08 19:00:56 -04001888 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1889 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1890 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1891 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1892 /* user rmpp is in effect
1893 * and this is an active RMPP MAD
1894 */
1895 mad_recv_wc->wc->wr_id = 0;
1896 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1897 mad_recv_wc);
1898 atomic_dec(&mad_agent_priv->refcount);
1899 } else {
1900 /* not user rmpp, revert to normal behavior and
1901 * drop the mad */
1902 ib_free_recv_mad(mad_recv_wc);
1903 deref_mad_agent(mad_agent_priv);
1904 return;
1905 }
1906 } else {
1907 ib_mark_mad_done(mad_send_wr);
1908 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1909
1910 /* Defined behavior is to complete response before request */
1911 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1912 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1913 mad_recv_wc);
1914 atomic_dec(&mad_agent_priv->refcount);
1915
1916 mad_send_wc.status = IB_WC_SUCCESS;
1917 mad_send_wc.vendor_err = 0;
1918 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1919 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 } else {
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001922 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1923 mad_recv_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -07001924 deref_mad_agent(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 }
1926}
1927
Ira Weinye11ae8a2015-06-06 14:38:24 -04001928static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
1929 const struct ib_mad_qp_info *qp_info,
1930 const struct ib_wc *wc,
1931 int port_num,
1932 struct ib_mad_private *recv,
1933 struct ib_mad_private *response)
1934{
1935 enum smi_forward_action retsmi;
1936
1937 if (smi_handle_dr_smp_recv(&recv->mad.smp,
1938 port_priv->device->node_type,
1939 port_num,
1940 port_priv->device->phys_port_cnt) ==
1941 IB_SMI_DISCARD)
1942 return IB_SMI_DISCARD;
1943
1944 retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1945 if (retsmi == IB_SMI_LOCAL)
1946 return IB_SMI_HANDLE;
1947
1948 if (retsmi == IB_SMI_SEND) { /* don't forward */
1949 if (smi_handle_dr_smp_send(&recv->mad.smp,
1950 port_priv->device->node_type,
1951 port_num) == IB_SMI_DISCARD)
1952 return IB_SMI_DISCARD;
1953
1954 if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1955 return IB_SMI_DISCARD;
1956 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1957 /* forward case for switches */
1958 memcpy(response, recv, sizeof(*response));
1959 response->header.recv_wc.wc = &response->header.wc;
1960 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1961 response->header.recv_wc.recv_buf.grh = &response->grh;
1962
1963 agent_send_response(&response->mad.mad,
1964 &response->grh, wc,
1965 port_priv->device,
1966 smi_get_fwd_port(&recv->mad.smp),
1967 qp_info->qp->qp_num);
1968
1969 return IB_SMI_DISCARD;
1970 }
1971 return IB_SMI_HANDLE;
1972}
1973
Swapna Thete0b307042012-02-25 17:47:32 -08001974static bool generate_unmatched_resp(struct ib_mad_private *recv,
1975 struct ib_mad_private *response)
1976{
1977 if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET ||
1978 recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) {
1979 memcpy(response, recv, sizeof *response);
1980 response->header.recv_wc.wc = &response->header.wc;
1981 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1982 response->header.recv_wc.recv_buf.grh = &response->grh;
1983 response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
1984 response->mad.mad.mad_hdr.status =
1985 cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
Jack Morgenstein840777d2012-04-24 16:06:50 -07001986 if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
1987 response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION;
Swapna Thete0b307042012-02-25 17:47:32 -08001988
1989 return true;
1990 } else {
1991 return false;
1992 }
1993}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1995 struct ib_wc *wc)
1996{
1997 struct ib_mad_qp_info *qp_info;
1998 struct ib_mad_private_header *mad_priv_hdr;
Hal Rosenstock445d6802007-08-03 10:45:17 -07001999 struct ib_mad_private *recv, *response = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 struct ib_mad_list_head *mad_list;
2001 struct ib_mad_agent_private *mad_agent;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002002 int port_num;
Jack Morgensteina9e74322012-04-24 16:08:57 -07002003 int ret = IB_MAD_RESULT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2006 qp_info = mad_list->mad_queue->qp_info;
2007 dequeue_mad(mad_list);
2008
2009 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2010 mad_list);
2011 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
Ralph Campbell15271062006-12-12 14:28:30 -08002012 ib_dma_unmap_single(port_priv->device,
2013 recv->header.mapping,
2014 sizeof(struct ib_mad_private) -
2015 sizeof(struct ib_mad_private_header),
2016 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
2018 /* Setup MAD receive work completion from "normal" work completion */
Sean Hefty24239af2005-04-16 15:26:08 -07002019 recv->header.wc = *wc;
2020 recv->header.recv_wc.wc = &recv->header.wc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2022 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
2023 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2024
2025 if (atomic_read(&qp_info->snoop_count))
2026 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2027
2028 /* Validate MAD */
Ira Weiny77f60832015-05-08 14:27:21 -04002029 if (!validate_mad(&recv->mad.mad.mad_hdr, qp_info->qp->qp_num))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 goto out;
2031
Hal Rosenstock445d6802007-08-03 10:45:17 -07002032 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2033 if (!response) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002034 dev_err(&port_priv->device->dev,
2035 "ib_mad_recv_done_handler no memory for response buffer\n");
Hal Rosenstock445d6802007-08-03 10:45:17 -07002036 goto out;
2037 }
2038
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002039 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
2040 port_num = wc->port_num;
2041 else
2042 port_num = port_priv->port_num;
2043
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 if (recv->mad.mad.mad_hdr.mgmt_class ==
2045 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
Ira Weinye11ae8a2015-06-06 14:38:24 -04002046 if (handle_ib_smi(port_priv, qp_info, wc, port_num, recv,
2047 response)
2048 == IB_SMI_DISCARD)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 }
2051
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 /* Give driver "right of first refusal" on incoming MAD */
2053 if (port_priv->device->process_mad) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 ret = port_priv->device->process_mad(port_priv->device, 0,
2055 port_priv->port_num,
2056 wc, &recv->grh,
2057 &recv->mad.mad,
2058 &response->mad.mad);
2059 if (ret & IB_MAD_RESULT_SUCCESS) {
2060 if (ret & IB_MAD_RESULT_CONSUMED)
2061 goto out;
2062 if (ret & IB_MAD_RESULT_REPLY) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002063 agent_send_response(&response->mad.mad,
2064 &recv->grh, wc,
2065 port_priv->device,
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002066 port_num,
Sean Hefty34816ad2005-10-25 10:51:39 -07002067 qp_info->qp->qp_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 goto out;
2069 }
2070 }
2071 }
2072
Ira Weinyd94bd262015-06-06 14:38:22 -04002073 mad_agent = find_mad_agent(port_priv, &recv->mad.mad.mad_hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 if (mad_agent) {
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07002075 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 /*
2077 * recv is freed up in error cases in ib_mad_complete_recv
2078 * or via recv_handler in ib_mad_complete_recv()
2079 */
2080 recv = NULL;
Jack Morgensteina9e74322012-04-24 16:08:57 -07002081 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2082 generate_unmatched_resp(recv, response)) {
Swapna Thete0b307042012-02-25 17:47:32 -08002083 agent_send_response(&response->mad.mad, &recv->grh, wc,
2084 port_priv->device, port_num, qp_info->qp->qp_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 }
2086
2087out:
2088 /* Post another receive request for this QP */
2089 if (response) {
2090 ib_mad_post_receive_mads(qp_info, response);
2091 if (recv)
2092 kmem_cache_free(ib_mad_cache, recv);
2093 } else
2094 ib_mad_post_receive_mads(qp_info, recv);
2095}
2096
2097static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2098{
2099 struct ib_mad_send_wr_private *mad_send_wr;
2100 unsigned long delay;
2101
2102 if (list_empty(&mad_agent_priv->wait_list)) {
Tejun Heo136b5722012-08-21 13:18:24 -07002103 cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 } else {
2105 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2106 struct ib_mad_send_wr_private,
2107 agent_list);
2108
2109 if (time_after(mad_agent_priv->timeout,
2110 mad_send_wr->timeout)) {
2111 mad_agent_priv->timeout = mad_send_wr->timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 delay = mad_send_wr->timeout - jiffies;
2113 if ((long)delay <= 0)
2114 delay = 1;
Tejun Heoe7c2f962012-08-21 13:18:24 -07002115 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2116 &mad_agent_priv->timed_work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 }
2118 }
2119}
2120
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002121static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122{
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002123 struct ib_mad_agent_private *mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 struct ib_mad_send_wr_private *temp_mad_send_wr;
2125 struct list_head *list_item;
2126 unsigned long delay;
2127
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002128 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 list_del(&mad_send_wr->agent_list);
2130
2131 delay = mad_send_wr->timeout;
2132 mad_send_wr->timeout += jiffies;
2133
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002134 if (delay) {
2135 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2136 temp_mad_send_wr = list_entry(list_item,
2137 struct ib_mad_send_wr_private,
2138 agent_list);
2139 if (time_after(mad_send_wr->timeout,
2140 temp_mad_send_wr->timeout))
2141 break;
2142 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 }
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002144 else
2145 list_item = &mad_agent_priv->wait_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 list_add(&mad_send_wr->agent_list, list_item);
2147
2148 /* Reschedule a work item if we have a shorter timeout */
Tejun Heoe7c2f962012-08-21 13:18:24 -07002149 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2150 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2151 &mad_agent_priv->timed_work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152}
2153
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002154void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2155 int timeout_ms)
2156{
2157 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2158 wait_for_response(mad_send_wr);
2159}
2160
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161/*
2162 * Process a send work completion
2163 */
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002164void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2165 struct ib_mad_send_wc *mad_send_wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166{
2167 struct ib_mad_agent_private *mad_agent_priv;
2168 unsigned long flags;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002169 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002171 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Ira Weiny1471cb62014-08-08 19:00:56 -04002173 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002174 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2175 if (ret == IB_RMPP_RESULT_CONSUMED)
2176 goto done;
2177 } else
2178 ret = IB_RMPP_RESULT_UNHANDLED;
2179
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 if (mad_send_wc->status != IB_WC_SUCCESS &&
2181 mad_send_wr->status == IB_WC_SUCCESS) {
2182 mad_send_wr->status = mad_send_wc->status;
2183 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2184 }
2185
2186 if (--mad_send_wr->refcount > 0) {
2187 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2188 mad_send_wr->status == IB_WC_SUCCESS) {
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002189 wait_for_response(mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002191 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 }
2193
2194 /* Remove send from MAD agent and notify client of completion */
2195 list_del(&mad_send_wr->agent_list);
2196 adjust_timeout(mad_agent_priv);
2197 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2198
2199 if (mad_send_wr->status != IB_WC_SUCCESS )
2200 mad_send_wc->status = mad_send_wr->status;
Sean Hefty34816ad2005-10-25 10:51:39 -07002201 if (ret == IB_RMPP_RESULT_INTERNAL)
2202 ib_rmpp_send_handler(mad_send_wc);
2203 else
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002204 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2205 mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
2207 /* Release reference on agent taken when sending */
Sean Hefty1b52fa982006-05-12 14:57:52 -07002208 deref_mad_agent(mad_agent_priv);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002209 return;
2210done:
2211 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212}
2213
2214static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2215 struct ib_wc *wc)
2216{
2217 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2218 struct ib_mad_list_head *mad_list;
2219 struct ib_mad_qp_info *qp_info;
2220 struct ib_mad_queue *send_queue;
2221 struct ib_send_wr *bad_send_wr;
Sean Hefty34816ad2005-10-25 10:51:39 -07002222 struct ib_mad_send_wc mad_send_wc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 unsigned long flags;
2224 int ret;
2225
2226 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2227 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2228 mad_list);
2229 send_queue = mad_list->mad_queue;
2230 qp_info = send_queue->qp_info;
2231
2232retry:
Ralph Campbell15271062006-12-12 14:28:30 -08002233 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2234 mad_send_wr->header_mapping,
2235 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2236 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2237 mad_send_wr->payload_mapping,
2238 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 queued_send_wr = NULL;
2240 spin_lock_irqsave(&send_queue->lock, flags);
2241 list_del(&mad_list->list);
2242
2243 /* Move queued send to the send queue */
2244 if (send_queue->count-- > send_queue->max_active) {
2245 mad_list = container_of(qp_info->overflow_list.next,
2246 struct ib_mad_list_head, list);
2247 queued_send_wr = container_of(mad_list,
2248 struct ib_mad_send_wr_private,
2249 mad_list);
Akinobu Mita179e0912006-06-26 00:24:41 -07002250 list_move_tail(&mad_list->list, &send_queue->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 }
2252 spin_unlock_irqrestore(&send_queue->lock, flags);
2253
Sean Hefty34816ad2005-10-25 10:51:39 -07002254 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2255 mad_send_wc.status = wc->status;
2256 mad_send_wc.vendor_err = wc->vendor_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 if (atomic_read(&qp_info->snoop_count))
Sean Hefty34816ad2005-10-25 10:51:39 -07002258 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 IB_MAD_SNOOP_SEND_COMPLETIONS);
Sean Hefty34816ad2005-10-25 10:51:39 -07002260 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261
2262 if (queued_send_wr) {
2263 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
Sean Hefty34816ad2005-10-25 10:51:39 -07002264 &bad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002266 dev_err(&port_priv->device->dev,
2267 "ib_post_send failed: %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 mad_send_wr = queued_send_wr;
2269 wc->status = IB_WC_LOC_QP_OP_ERR;
2270 goto retry;
2271 }
2272 }
2273}
2274
2275static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2276{
2277 struct ib_mad_send_wr_private *mad_send_wr;
2278 struct ib_mad_list_head *mad_list;
2279 unsigned long flags;
2280
2281 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2282 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2283 mad_send_wr = container_of(mad_list,
2284 struct ib_mad_send_wr_private,
2285 mad_list);
2286 mad_send_wr->retry = 1;
2287 }
2288 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2289}
2290
2291static void mad_error_handler(struct ib_mad_port_private *port_priv,
2292 struct ib_wc *wc)
2293{
2294 struct ib_mad_list_head *mad_list;
2295 struct ib_mad_qp_info *qp_info;
2296 struct ib_mad_send_wr_private *mad_send_wr;
2297 int ret;
2298
2299 /* Determine if failure was a send or receive */
2300 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2301 qp_info = mad_list->mad_queue->qp_info;
2302 if (mad_list->mad_queue == &qp_info->recv_queue)
2303 /*
2304 * Receive errors indicate that the QP has entered the error
2305 * state - error handling/shutdown code will cleanup
2306 */
2307 return;
2308
2309 /*
2310 * Send errors will transition the QP to SQE - move
2311 * QP to RTS and repost flushed work requests
2312 */
2313 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2314 mad_list);
2315 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2316 if (mad_send_wr->retry) {
2317 /* Repost send */
2318 struct ib_send_wr *bad_send_wr;
2319
2320 mad_send_wr->retry = 0;
2321 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2322 &bad_send_wr);
2323 if (ret)
2324 ib_mad_send_done_handler(port_priv, wc);
2325 } else
2326 ib_mad_send_done_handler(port_priv, wc);
2327 } else {
2328 struct ib_qp_attr *attr;
2329
2330 /* Transition QP to RTS and fail offending send */
2331 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2332 if (attr) {
2333 attr->qp_state = IB_QPS_RTS;
2334 attr->cur_qp_state = IB_QPS_SQE;
2335 ret = ib_modify_qp(qp_info->qp, attr,
2336 IB_QP_STATE | IB_QP_CUR_STATE);
2337 kfree(attr);
2338 if (ret)
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002339 dev_err(&port_priv->device->dev,
2340 "mad_error_handler - ib_modify_qp to RTS : %d\n",
2341 ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 else
2343 mark_sends_for_retry(qp_info);
2344 }
2345 ib_mad_send_done_handler(port_priv, wc);
2346 }
2347}
2348
2349/*
2350 * IB MAD completion callback
2351 */
David Howellsc4028952006-11-22 14:57:56 +00002352static void ib_mad_completion_handler(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353{
2354 struct ib_mad_port_private *port_priv;
2355 struct ib_wc wc;
2356
David Howellsc4028952006-11-22 14:57:56 +00002357 port_priv = container_of(work, struct ib_mad_port_private, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2359
2360 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2361 if (wc.status == IB_WC_SUCCESS) {
2362 switch (wc.opcode) {
2363 case IB_WC_SEND:
2364 ib_mad_send_done_handler(port_priv, &wc);
2365 break;
2366 case IB_WC_RECV:
2367 ib_mad_recv_done_handler(port_priv, &wc);
2368 break;
2369 default:
2370 BUG_ON(1);
2371 break;
2372 }
2373 } else
2374 mad_error_handler(port_priv, &wc);
2375 }
2376}
2377
2378static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2379{
2380 unsigned long flags;
2381 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2382 struct ib_mad_send_wc mad_send_wc;
2383 struct list_head cancel_list;
2384
2385 INIT_LIST_HEAD(&cancel_list);
2386
2387 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2388 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2389 &mad_agent_priv->send_list, agent_list) {
2390 if (mad_send_wr->status == IB_WC_SUCCESS) {
Roland Dreier3cd96562006-09-22 15:22:46 -07002391 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2393 }
2394 }
2395
2396 /* Empty wait list to prevent receives from finding a request */
2397 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2398 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2399
2400 /* Report all cancelled requests */
2401 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2402 mad_send_wc.vendor_err = 0;
2403
2404 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2405 &cancel_list, agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002406 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2407 list_del(&mad_send_wr->agent_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2409 &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 atomic_dec(&mad_agent_priv->refcount);
2411 }
2412}
2413
2414static struct ib_mad_send_wr_private*
Sean Hefty34816ad2005-10-25 10:51:39 -07002415find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2416 struct ib_mad_send_buf *send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417{
2418 struct ib_mad_send_wr_private *mad_send_wr;
2419
2420 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2421 agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002422 if (&mad_send_wr->send_buf == send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423 return mad_send_wr;
2424 }
2425
2426 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2427 agent_list) {
Ira Weinyc597eee2015-05-08 13:10:03 -04002428 if (is_rmpp_data_mad(mad_agent_priv,
2429 mad_send_wr->send_buf.mad) &&
Sean Hefty34816ad2005-10-25 10:51:39 -07002430 &mad_send_wr->send_buf == send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 return mad_send_wr;
2432 }
2433 return NULL;
2434}
2435
Sean Hefty34816ad2005-10-25 10:51:39 -07002436int ib_modify_mad(struct ib_mad_agent *mad_agent,
2437 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438{
2439 struct ib_mad_agent_private *mad_agent_priv;
2440 struct ib_mad_send_wr_private *mad_send_wr;
2441 unsigned long flags;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002442 int active;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443
2444 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2445 agent);
2446 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Sean Hefty34816ad2005-10-25 10:51:39 -07002447 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002448 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002450 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 }
2452
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002453 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002454 if (!timeout_ms) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002456 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 }
2458
Sean Hefty34816ad2005-10-25 10:51:39 -07002459 mad_send_wr->send_buf.timeout_ms = timeout_ms;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002460 if (active)
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002461 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2462 else
2463 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002465 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2466 return 0;
2467}
2468EXPORT_SYMBOL(ib_modify_mad);
2469
Sean Hefty34816ad2005-10-25 10:51:39 -07002470void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2471 struct ib_mad_send_buf *send_buf)
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002472{
Sean Hefty34816ad2005-10-25 10:51:39 -07002473 ib_modify_mad(mad_agent, send_buf, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474}
2475EXPORT_SYMBOL(ib_cancel_mad);
2476
David Howellsc4028952006-11-22 14:57:56 +00002477static void local_completions(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478{
2479 struct ib_mad_agent_private *mad_agent_priv;
2480 struct ib_mad_local_private *local;
2481 struct ib_mad_agent_private *recv_mad_agent;
2482 unsigned long flags;
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002483 int free_mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 struct ib_wc wc;
2485 struct ib_mad_send_wc mad_send_wc;
2486
David Howellsc4028952006-11-22 14:57:56 +00002487 mad_agent_priv =
2488 container_of(work, struct ib_mad_agent_private, local_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489
2490 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2491 while (!list_empty(&mad_agent_priv->local_list)) {
2492 local = list_entry(mad_agent_priv->local_list.next,
2493 struct ib_mad_local_private,
2494 completion_list);
Michael S. Tsirkin37289ef2006-03-30 15:52:54 +02002495 list_del(&local->completion_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002497 free_mad = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 if (local->mad_priv) {
2499 recv_mad_agent = local->recv_mad_agent;
2500 if (!recv_mad_agent) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002501 dev_err(&mad_agent_priv->agent.device->dev,
2502 "No receive MAD agent for local completion\n");
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002503 free_mad = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 goto local_send_completion;
2505 }
2506
2507 /*
2508 * Defined behavior is to complete response
2509 * before request
2510 */
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +02002511 build_smp_wc(recv_mad_agent->agent.qp,
2512 (unsigned long) local->mad_send_wr,
Sean Hefty97f52eb2005-08-13 21:05:57 -07002513 be16_to_cpu(IB_LID_PERMISSIVE),
Sean Hefty34816ad2005-10-25 10:51:39 -07002514 0, recv_mad_agent->agent.port_num, &wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515
2516 local->mad_priv->header.recv_wc.wc = &wc;
2517 local->mad_priv->header.recv_wc.mad_len =
2518 sizeof(struct ib_mad);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002519 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2520 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2521 &local->mad_priv->header.recv_wc.rmpp_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2523 local->mad_priv->header.recv_wc.recv_buf.mad =
2524 &local->mad_priv->mad.mad;
2525 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2526 snoop_recv(recv_mad_agent->qp_info,
2527 &local->mad_priv->header.recv_wc,
2528 IB_MAD_SNOOP_RECVS);
2529 recv_mad_agent->agent.recv_handler(
2530 &recv_mad_agent->agent,
2531 &local->mad_priv->header.recv_wc);
2532 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2533 atomic_dec(&recv_mad_agent->refcount);
2534 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2535 }
2536
2537local_send_completion:
2538 /* Complete send */
2539 mad_send_wc.status = IB_WC_SUCCESS;
2540 mad_send_wc.vendor_err = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07002541 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
Sean Hefty34816ad2005-10-25 10:51:39 -07002543 snoop_send(mad_agent_priv->qp_info,
2544 &local->mad_send_wr->send_buf,
2545 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2547 &mad_send_wc);
2548
2549 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 atomic_dec(&mad_agent_priv->refcount);
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002551 if (free_mad)
Hal Rosenstock2c153b92005-07-27 11:45:31 -07002552 kmem_cache_free(ib_mad_cache, local->mad_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 kfree(local);
2554 }
2555 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2556}
2557
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002558static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2559{
2560 int ret;
2561
Sean Hefty4fc8cd42007-11-27 00:11:04 -08002562 if (!mad_send_wr->retries_left)
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002563 return -ETIMEDOUT;
2564
Sean Hefty4fc8cd42007-11-27 00:11:04 -08002565 mad_send_wr->retries_left--;
2566 mad_send_wr->send_buf.retries++;
2567
Sean Hefty34816ad2005-10-25 10:51:39 -07002568 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002569
Ira Weiny1471cb62014-08-08 19:00:56 -04002570 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002571 ret = ib_retry_rmpp(mad_send_wr);
2572 switch (ret) {
2573 case IB_RMPP_RESULT_UNHANDLED:
2574 ret = ib_send_mad(mad_send_wr);
2575 break;
2576 case IB_RMPP_RESULT_CONSUMED:
2577 ret = 0;
2578 break;
2579 default:
2580 ret = -ECOMM;
2581 break;
2582 }
2583 } else
2584 ret = ib_send_mad(mad_send_wr);
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002585
2586 if (!ret) {
2587 mad_send_wr->refcount++;
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002588 list_add_tail(&mad_send_wr->agent_list,
2589 &mad_send_wr->mad_agent_priv->send_list);
2590 }
2591 return ret;
2592}
2593
David Howellsc4028952006-11-22 14:57:56 +00002594static void timeout_sends(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595{
2596 struct ib_mad_agent_private *mad_agent_priv;
2597 struct ib_mad_send_wr_private *mad_send_wr;
2598 struct ib_mad_send_wc mad_send_wc;
2599 unsigned long flags, delay;
2600
David Howellsc4028952006-11-22 14:57:56 +00002601 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2602 timed_work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 mad_send_wc.vendor_err = 0;
2604
2605 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2606 while (!list_empty(&mad_agent_priv->wait_list)) {
2607 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2608 struct ib_mad_send_wr_private,
2609 agent_list);
2610
2611 if (time_after(mad_send_wr->timeout, jiffies)) {
2612 delay = mad_send_wr->timeout - jiffies;
2613 if ((long)delay <= 0)
2614 delay = 1;
2615 queue_delayed_work(mad_agent_priv->qp_info->
2616 port_priv->wq,
2617 &mad_agent_priv->timed_work, delay);
2618 break;
2619 }
2620
Hal Rosenstockdbf92272005-07-27 11:45:30 -07002621 list_del(&mad_send_wr->agent_list);
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002622 if (mad_send_wr->status == IB_WC_SUCCESS &&
2623 !retry_send(mad_send_wr))
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002624 continue;
2625
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2627
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002628 if (mad_send_wr->status == IB_WC_SUCCESS)
2629 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2630 else
2631 mad_send_wc.status = mad_send_wr->status;
Sean Hefty34816ad2005-10-25 10:51:39 -07002632 mad_send_wc.send_buf = &mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2634 &mad_send_wc);
2635
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 atomic_dec(&mad_agent_priv->refcount);
2637 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2638 }
2639 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2640}
2641
Hal Rosenstock5dd2ce12005-08-15 14:16:36 -07002642static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643{
2644 struct ib_mad_port_private *port_priv = cq->cq_context;
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002645 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002647 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2648 if (!list_empty(&port_priv->port_list))
2649 queue_work(port_priv->wq, &port_priv->work);
2650 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651}
2652
2653/*
2654 * Allocate receive MADs and post receive WRs for them
2655 */
2656static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2657 struct ib_mad_private *mad)
2658{
2659 unsigned long flags;
2660 int post, ret;
2661 struct ib_mad_private *mad_priv;
2662 struct ib_sge sg_list;
2663 struct ib_recv_wr recv_wr, *bad_recv_wr;
2664 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2665
2666 /* Initialize common scatter list fields */
2667 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2668 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2669
2670 /* Initialize common receive WR fields */
2671 recv_wr.next = NULL;
2672 recv_wr.sg_list = &sg_list;
2673 recv_wr.num_sge = 1;
2674
2675 do {
2676 /* Allocate and map receive buffer */
2677 if (mad) {
2678 mad_priv = mad;
2679 mad = NULL;
2680 } else {
2681 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2682 if (!mad_priv) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002683 dev_err(&qp_info->port_priv->device->dev,
2684 "No memory for receive buffer\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 ret = -ENOMEM;
2686 break;
2687 }
2688 }
Ralph Campbell15271062006-12-12 14:28:30 -08002689 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2690 &mad_priv->grh,
2691 sizeof *mad_priv -
2692 sizeof mad_priv->header,
2693 DMA_FROM_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02002694 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2695 sg_list.addr))) {
2696 ret = -ENOMEM;
2697 break;
2698 }
Ralph Campbell15271062006-12-12 14:28:30 -08002699 mad_priv->header.mapping = sg_list.addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2701 mad_priv->header.mad_list.mad_queue = recv_queue;
2702
2703 /* Post receive WR */
2704 spin_lock_irqsave(&recv_queue->lock, flags);
2705 post = (++recv_queue->count < recv_queue->max_active);
2706 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2707 spin_unlock_irqrestore(&recv_queue->lock, flags);
2708 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2709 if (ret) {
2710 spin_lock_irqsave(&recv_queue->lock, flags);
2711 list_del(&mad_priv->header.mad_list.list);
2712 recv_queue->count--;
2713 spin_unlock_irqrestore(&recv_queue->lock, flags);
Ralph Campbell15271062006-12-12 14:28:30 -08002714 ib_dma_unmap_single(qp_info->port_priv->device,
2715 mad_priv->header.mapping,
2716 sizeof *mad_priv -
2717 sizeof mad_priv->header,
2718 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719 kmem_cache_free(ib_mad_cache, mad_priv);
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002720 dev_err(&qp_info->port_priv->device->dev,
2721 "ib_post_recv failed: %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 break;
2723 }
2724 } while (post);
2725
2726 return ret;
2727}
2728
2729/*
2730 * Return all the posted receive MADs
2731 */
2732static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2733{
2734 struct ib_mad_private_header *mad_priv_hdr;
2735 struct ib_mad_private *recv;
2736 struct ib_mad_list_head *mad_list;
2737
Eli Cohenfac70d52010-09-27 17:51:11 -07002738 if (!qp_info->qp)
2739 return;
2740
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 while (!list_empty(&qp_info->recv_queue.list)) {
2742
2743 mad_list = list_entry(qp_info->recv_queue.list.next,
2744 struct ib_mad_list_head, list);
2745 mad_priv_hdr = container_of(mad_list,
2746 struct ib_mad_private_header,
2747 mad_list);
2748 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2749 header);
2750
2751 /* Remove from posted receive MAD list */
2752 list_del(&mad_list->list);
2753
Ralph Campbell15271062006-12-12 14:28:30 -08002754 ib_dma_unmap_single(qp_info->port_priv->device,
2755 recv->header.mapping,
2756 sizeof(struct ib_mad_private) -
2757 sizeof(struct ib_mad_private_header),
2758 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 kmem_cache_free(ib_mad_cache, recv);
2760 }
2761
2762 qp_info->recv_queue.count = 0;
2763}
2764
2765/*
2766 * Start the port
2767 */
2768static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2769{
2770 int ret, i;
2771 struct ib_qp_attr *attr;
2772 struct ib_qp *qp;
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002773 u16 pkey_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774
2775 attr = kmalloc(sizeof *attr, GFP_KERNEL);
Roland Dreier3cd96562006-09-22 15:22:46 -07002776 if (!attr) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002777 dev_err(&port_priv->device->dev,
2778 "Couldn't kmalloc ib_qp_attr\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 return -ENOMEM;
2780 }
2781
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002782 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2783 IB_DEFAULT_PKEY_FULL, &pkey_index);
2784 if (ret)
2785 pkey_index = 0;
2786
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2788 qp = port_priv->qp_info[i].qp;
Eli Cohenfac70d52010-09-27 17:51:11 -07002789 if (!qp)
2790 continue;
2791
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 /*
2793 * PKey index for QP1 is irrelevant but
2794 * one is needed for the Reset to Init transition
2795 */
2796 attr->qp_state = IB_QPS_INIT;
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002797 attr->pkey_index = pkey_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2799 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2800 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2801 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002802 dev_err(&port_priv->device->dev,
2803 "Couldn't change QP%d state to INIT: %d\n",
2804 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 goto out;
2806 }
2807
2808 attr->qp_state = IB_QPS_RTR;
2809 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2810 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002811 dev_err(&port_priv->device->dev,
2812 "Couldn't change QP%d state to RTR: %d\n",
2813 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 goto out;
2815 }
2816
2817 attr->qp_state = IB_QPS_RTS;
2818 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2819 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2820 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002821 dev_err(&port_priv->device->dev,
2822 "Couldn't change QP%d state to RTS: %d\n",
2823 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 goto out;
2825 }
2826 }
2827
2828 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2829 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002830 dev_err(&port_priv->device->dev,
2831 "Failed to request completion notification: %d\n",
2832 ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 goto out;
2834 }
2835
2836 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
Eli Cohenfac70d52010-09-27 17:51:11 -07002837 if (!port_priv->qp_info[i].qp)
2838 continue;
2839
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2841 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002842 dev_err(&port_priv->device->dev,
2843 "Couldn't post receive WRs\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 goto out;
2845 }
2846 }
2847out:
2848 kfree(attr);
2849 return ret;
2850}
2851
2852static void qp_event_handler(struct ib_event *event, void *qp_context)
2853{
2854 struct ib_mad_qp_info *qp_info = qp_context;
2855
2856 /* It's worse than that! He's dead, Jim! */
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002857 dev_err(&qp_info->port_priv->device->dev,
2858 "Fatal error (%d) on MAD QP (%d)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 event->event, qp_info->qp->qp_num);
2860}
2861
2862static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2863 struct ib_mad_queue *mad_queue)
2864{
2865 mad_queue->qp_info = qp_info;
2866 mad_queue->count = 0;
2867 spin_lock_init(&mad_queue->lock);
2868 INIT_LIST_HEAD(&mad_queue->list);
2869}
2870
2871static void init_mad_qp(struct ib_mad_port_private *port_priv,
2872 struct ib_mad_qp_info *qp_info)
2873{
2874 qp_info->port_priv = port_priv;
2875 init_mad_queue(qp_info, &qp_info->send_queue);
2876 init_mad_queue(qp_info, &qp_info->recv_queue);
2877 INIT_LIST_HEAD(&qp_info->overflow_list);
2878 spin_lock_init(&qp_info->snoop_lock);
2879 qp_info->snoop_table = NULL;
2880 qp_info->snoop_table_size = 0;
2881 atomic_set(&qp_info->snoop_count, 0);
2882}
2883
2884static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2885 enum ib_qp_type qp_type)
2886{
2887 struct ib_qp_init_attr qp_init_attr;
2888 int ret;
2889
2890 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2891 qp_init_attr.send_cq = qp_info->port_priv->cq;
2892 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2893 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07002894 qp_init_attr.cap.max_send_wr = mad_sendq_size;
2895 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2897 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2898 qp_init_attr.qp_type = qp_type;
2899 qp_init_attr.port_num = qp_info->port_priv->port_num;
2900 qp_init_attr.qp_context = qp_info;
2901 qp_init_attr.event_handler = qp_event_handler;
2902 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2903 if (IS_ERR(qp_info->qp)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002904 dev_err(&qp_info->port_priv->device->dev,
2905 "Couldn't create ib_mad QP%d\n",
2906 get_spl_qp_index(qp_type));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907 ret = PTR_ERR(qp_info->qp);
2908 goto error;
2909 }
2910 /* Use minimum queue sizes unless the CQ is resized */
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07002911 qp_info->send_queue.max_active = mad_sendq_size;
2912 qp_info->recv_queue.max_active = mad_recvq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 return 0;
2914
2915error:
2916 return ret;
2917}
2918
2919static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2920{
Eli Cohenfac70d52010-09-27 17:51:11 -07002921 if (!qp_info->qp)
2922 return;
2923
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 ib_destroy_qp(qp_info->qp);
Jesper Juhl6044ec82005-11-07 01:01:32 -08002925 kfree(qp_info->snoop_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926}
2927
2928/*
2929 * Open the port
2930 * Create the QP, PD, MR, and CQ if needed
2931 */
2932static int ib_mad_port_open(struct ib_device *device,
2933 int port_num)
2934{
2935 int ret, cq_size;
2936 struct ib_mad_port_private *port_priv;
2937 unsigned long flags;
2938 char name[sizeof "ib_mad123"];
Eli Cohenfac70d52010-09-27 17:51:11 -07002939 int has_smi;
Matan Barak8e372102015-06-11 16:35:21 +03002940 struct ib_cq_init_attr cq_attr = {};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941
Ira Weiny337877a2015-06-06 14:38:29 -04002942 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
2943 return -EFAULT;
2944
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 /* Create new device info */
Roland Dreierde6eb662005-11-02 07:23:14 -08002946 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947 if (!port_priv) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002948 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949 return -ENOMEM;
2950 }
Roland Dreierde6eb662005-11-02 07:23:14 -08002951
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952 port_priv->device = device;
2953 port_priv->port_num = port_num;
2954 spin_lock_init(&port_priv->reg_lock);
2955 INIT_LIST_HEAD(&port_priv->agent_list);
2956 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2957 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2958
Eli Cohenfac70d52010-09-27 17:51:11 -07002959 cq_size = mad_sendq_size + mad_recvq_size;
Michael Wang29541e32015-05-05 14:50:33 +02002960 has_smi = rdma_cap_ib_smi(device, port_num);
Eli Cohenfac70d52010-09-27 17:51:11 -07002961 if (has_smi)
2962 cq_size *= 2;
2963
Matan Barak8e372102015-06-11 16:35:21 +03002964 cq_attr.cqe = cq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 port_priv->cq = ib_create_cq(port_priv->device,
Hal Rosenstock5dd2ce12005-08-15 14:16:36 -07002966 ib_mad_thread_completion_handler,
Matan Barak8e372102015-06-11 16:35:21 +03002967 NULL, port_priv, &cq_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 if (IS_ERR(port_priv->cq)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002969 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 ret = PTR_ERR(port_priv->cq);
2971 goto error3;
2972 }
2973
2974 port_priv->pd = ib_alloc_pd(device);
2975 if (IS_ERR(port_priv->pd)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002976 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977 ret = PTR_ERR(port_priv->pd);
2978 goto error4;
2979 }
2980
2981 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2982 if (IS_ERR(port_priv->mr)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002983 dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984 ret = PTR_ERR(port_priv->mr);
2985 goto error5;
2986 }
2987
Eli Cohenfac70d52010-09-27 17:51:11 -07002988 if (has_smi) {
2989 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2990 if (ret)
2991 goto error6;
2992 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2994 if (ret)
2995 goto error7;
2996
2997 snprintf(name, sizeof name, "ib_mad%d", port_num);
2998 port_priv->wq = create_singlethread_workqueue(name);
2999 if (!port_priv->wq) {
3000 ret = -ENOMEM;
3001 goto error8;
3002 }
David Howellsc4028952006-11-22 14:57:56 +00003003 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004
Michael S. Tsirkindc059802006-03-20 10:08:25 -08003005 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3006 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3007 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3008
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 ret = ib_mad_port_start(port_priv);
3010 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003011 dev_err(&device->dev, "Couldn't start port\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 goto error9;
3013 }
3014
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015 return 0;
3016
3017error9:
Michael S. Tsirkindc059802006-03-20 10:08:25 -08003018 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3019 list_del_init(&port_priv->port_list);
3020 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3021
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022 destroy_workqueue(port_priv->wq);
3023error8:
3024 destroy_mad_qp(&port_priv->qp_info[1]);
3025error7:
3026 destroy_mad_qp(&port_priv->qp_info[0]);
3027error6:
3028 ib_dereg_mr(port_priv->mr);
3029error5:
3030 ib_dealloc_pd(port_priv->pd);
3031error4:
3032 ib_destroy_cq(port_priv->cq);
3033 cleanup_recv_queue(&port_priv->qp_info[1]);
3034 cleanup_recv_queue(&port_priv->qp_info[0]);
3035error3:
3036 kfree(port_priv);
3037
3038 return ret;
3039}
3040
3041/*
3042 * Close the port
3043 * If there are no classes using the port, free the port
3044 * resources (CQ, MR, PD, QP) and remove the port's info structure
3045 */
3046static int ib_mad_port_close(struct ib_device *device, int port_num)
3047{
3048 struct ib_mad_port_private *port_priv;
3049 unsigned long flags;
3050
3051 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3052 port_priv = __ib_get_mad_port(device, port_num);
3053 if (port_priv == NULL) {
3054 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003055 dev_err(&device->dev, "Port %d not found\n", port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 return -ENODEV;
3057 }
Michael S. Tsirkindc059802006-03-20 10:08:25 -08003058 list_del_init(&port_priv->port_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3060
Linus Torvalds1da177e2005-04-16 15:20:36 -07003061 destroy_workqueue(port_priv->wq);
3062 destroy_mad_qp(&port_priv->qp_info[1]);
3063 destroy_mad_qp(&port_priv->qp_info[0]);
3064 ib_dereg_mr(port_priv->mr);
3065 ib_dealloc_pd(port_priv->pd);
3066 ib_destroy_cq(port_priv->cq);
3067 cleanup_recv_queue(&port_priv->qp_info[1]);
3068 cleanup_recv_queue(&port_priv->qp_info[0]);
3069 /* XXX: Handle deallocation of MAD registration tables */
3070
3071 kfree(port_priv);
3072
3073 return 0;
3074}
3075
3076static void ib_mad_init_device(struct ib_device *device)
3077{
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003078 int start, end, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079
Tom Tucker07ebafb2006-08-03 16:02:42 -05003080 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003081 start = 0;
3082 end = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 } else {
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003084 start = 1;
3085 end = device->phys_port_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086 }
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003087
3088 for (i = start; i <= end; i++) {
Michael Wangc757dea2015-05-05 14:50:32 +02003089 if (!rdma_cap_ib_mad(device, i))
Michael Wang827f2a82015-05-05 14:50:20 +02003090 continue;
3091
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003092 if (ib_mad_port_open(device, i)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003093 dev_err(&device->dev, "Couldn't open port %d\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003094 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 }
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003096 if (ib_agent_port_open(device, i)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003097 dev_err(&device->dev,
3098 "Couldn't open port %d for agents\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003099 goto error_agent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100 }
3101 }
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07003102 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003104error_agent:
3105 if (ib_mad_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003106 dev_err(&device->dev, "Couldn't close port %d\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003107
3108error:
Michael Wang827f2a82015-05-05 14:50:20 +02003109 while (--i >= start) {
Michael Wangc757dea2015-05-05 14:50:32 +02003110 if (!rdma_cap_ib_mad(device, i))
Michael Wang827f2a82015-05-05 14:50:20 +02003111 continue;
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003112
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003113 if (ib_agent_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003114 dev_err(&device->dev,
3115 "Couldn't close port %d for agents\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003116 if (ib_mad_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003117 dev_err(&device->dev, "Couldn't close port %d\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119}
3120
3121static void ib_mad_remove_device(struct ib_device *device)
3122{
Michael Wang827f2a82015-05-05 14:50:20 +02003123 int start, end, i;
Steve Wise070e1402010-03-04 18:18:18 +00003124
Tom Tucker07ebafb2006-08-03 16:02:42 -05003125 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Michael Wang827f2a82015-05-05 14:50:20 +02003126 start = 0;
3127 end = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128 } else {
Michael Wang827f2a82015-05-05 14:50:20 +02003129 start = 1;
3130 end = device->phys_port_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131 }
Michael Wang827f2a82015-05-05 14:50:20 +02003132
3133 for (i = start; i <= end; i++) {
Michael Wangc757dea2015-05-05 14:50:32 +02003134 if (!rdma_cap_ib_mad(device, i))
Michael Wang827f2a82015-05-05 14:50:20 +02003135 continue;
3136
3137 if (ib_agent_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003138 dev_err(&device->dev,
Michael Wang827f2a82015-05-05 14:50:20 +02003139 "Couldn't close port %d for agents\n", i);
3140 if (ib_mad_port_close(device, i))
3141 dev_err(&device->dev, "Couldn't close port %d\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142 }
3143}
3144
3145static struct ib_client mad_client = {
3146 .name = "mad",
3147 .add = ib_mad_init_device,
3148 .remove = ib_mad_remove_device
3149};
3150
3151static int __init ib_mad_init_module(void)
3152{
3153 int ret;
3154
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07003155 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3156 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3157
3158 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3159 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3160
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161 ib_mad_cache = kmem_cache_create("ib_mad",
3162 sizeof(struct ib_mad_private),
3163 0,
3164 SLAB_HWCACHE_ALIGN,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 NULL);
3166 if (!ib_mad_cache) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003167 pr_err("Couldn't create ib_mad cache\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168 ret = -ENOMEM;
3169 goto error1;
3170 }
3171
3172 INIT_LIST_HEAD(&ib_mad_port_list);
3173
3174 if (ib_register_client(&mad_client)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003175 pr_err("Couldn't register ib_mad client\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176 ret = -EINVAL;
3177 goto error2;
3178 }
3179
3180 return 0;
3181
3182error2:
3183 kmem_cache_destroy(ib_mad_cache);
3184error1:
3185 return ret;
3186}
3187
3188static void __exit ib_mad_cleanup_module(void)
3189{
3190 ib_unregister_client(&mad_client);
Alexey Dobriyan1a1d92c2006-09-27 01:49:40 -07003191 kmem_cache_destroy(ib_mad_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192}
3193
3194module_init(ib_mad_init_module);
3195module_exit(ib_mad_cleanup_module);