blob: 309eaa191c3ecda0ff9f39dbe557610fec1d8d96 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Hal Rosenstockde493d42007-04-02 11:24:07 -04002 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
Hal Rosenstockfa619a72005-07-27 11:45:37 -07003 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07005 * Copyright (c) 2009 HNR Consulting. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 */
Ira Weiny7ef5d4b2014-08-08 19:00:53 -040036
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040041#include <linux/module.h>
Jack Morgenstein9874e742006-06-17 20:37:34 -070042#include <rdma/ib_cache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include "mad_priv.h"
Hal Rosenstockfa619a72005-07-27 11:45:37 -070045#include "mad_rmpp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include "smi.h"
47#include "agent.h"
48
49MODULE_LICENSE("Dual BSD/GPL");
50MODULE_DESCRIPTION("kernel IB MAD API");
51MODULE_AUTHOR("Hal Rosenstock");
52MODULE_AUTHOR("Sean Hefty");
53
Roland Dreier16933952010-05-23 21:39:31 -070054static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
55static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
Hal Rosenstockb76aabc2009-09-07 08:28:48 -070056
57module_param_named(send_queue_size, mad_sendq_size, int, 0444);
58MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
59module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
60MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
61
Roland Dreiere54f8182006-11-29 15:33:07 -080062static struct kmem_cache *ib_mad_cache;
Hal Rosenstockfa619a72005-07-27 11:45:37 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064static struct list_head ib_mad_port_list;
65static u32 ib_mad_client_id = 0;
66
67/* Port list lock */
Roland Dreier6276e082009-09-05 20:24:23 -070068static DEFINE_SPINLOCK(ib_mad_port_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70/* Forward declarations */
71static int method_in_use(struct ib_mad_mgmt_method_table **method,
72 struct ib_mad_reg_req *mad_reg_req);
73static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
74static struct ib_mad_agent_private *find_mad_agent(
75 struct ib_mad_port_private *port_priv,
Ira Weinyd94bd262015-06-06 14:38:22 -040076 const struct ib_mad_hdr *mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
78 struct ib_mad_private *mad);
79static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
David Howellsc4028952006-11-22 14:57:56 +000080static void timeout_sends(struct work_struct *work);
81static void local_completions(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
83 struct ib_mad_agent_private *agent_priv,
84 u8 mgmt_class);
85static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86 struct ib_mad_agent_private *agent_priv);
87
88/*
89 * Returns a ib_mad_port_private structure or NULL for a device/port
90 * Assumes ib_mad_port_list_lock is being held
91 */
92static inline struct ib_mad_port_private *
93__ib_get_mad_port(struct ib_device *device, int port_num)
94{
95 struct ib_mad_port_private *entry;
96
97 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
98 if (entry->device == device && entry->port_num == port_num)
99 return entry;
100 }
101 return NULL;
102}
103
104/*
105 * Wrapper function to return a ib_mad_port_private structure or NULL
106 * for a device/port
107 */
108static inline struct ib_mad_port_private *
109ib_get_mad_port(struct ib_device *device, int port_num)
110{
111 struct ib_mad_port_private *entry;
112 unsigned long flags;
113
114 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
115 entry = __ib_get_mad_port(device, port_num);
116 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
117
118 return entry;
119}
120
121static inline u8 convert_mgmt_class(u8 mgmt_class)
122{
123 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
124 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
125 0 : mgmt_class;
126}
127
128static int get_spl_qp_index(enum ib_qp_type qp_type)
129{
130 switch (qp_type)
131 {
132 case IB_QPT_SMI:
133 return 0;
134 case IB_QPT_GSI:
135 return 1;
136 default:
137 return -1;
138 }
139}
140
141static int vendor_class_index(u8 mgmt_class)
142{
143 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
144}
145
146static int is_vendor_class(u8 mgmt_class)
147{
148 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
149 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
150 return 0;
151 return 1;
152}
153
154static int is_vendor_oui(char *oui)
155{
156 if (oui[0] || oui[1] || oui[2])
157 return 1;
158 return 0;
159}
160
161static int is_vendor_method_in_use(
162 struct ib_mad_mgmt_vendor_class *vendor_class,
163 struct ib_mad_reg_req *mad_reg_req)
164{
165 struct ib_mad_mgmt_method_table *method;
166 int i;
167
168 for (i = 0; i < MAX_MGMT_OUI; i++) {
169 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
170 method = vendor_class->method_table[i];
171 if (method) {
172 if (method_in_use(&method, mad_reg_req))
173 return 1;
174 else
175 break;
176 }
177 }
178 }
179 return 0;
180}
181
Ira Weiny96909302015-05-08 14:27:22 -0400182int ib_response_mad(const struct ib_mad_hdr *hdr)
Sean Hefty2527e682006-07-20 11:25:50 +0300183{
Ira Weiny96909302015-05-08 14:27:22 -0400184 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
185 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
186 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
187 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
Sean Hefty2527e682006-07-20 11:25:50 +0300188}
189EXPORT_SYMBOL(ib_response_mad);
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191/*
192 * ib_register_mad_agent - Register to send/receive MADs
193 */
194struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
195 u8 port_num,
196 enum ib_qp_type qp_type,
197 struct ib_mad_reg_req *mad_reg_req,
198 u8 rmpp_version,
199 ib_mad_send_handler send_handler,
200 ib_mad_recv_handler recv_handler,
Ira Weiny0f29b462014-08-08 19:00:55 -0400201 void *context,
202 u32 registration_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
204 struct ib_mad_port_private *port_priv;
205 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
206 struct ib_mad_agent_private *mad_agent_priv;
207 struct ib_mad_reg_req *reg_req = NULL;
208 struct ib_mad_mgmt_class_table *class;
209 struct ib_mad_mgmt_vendor_class_table *vendor;
210 struct ib_mad_mgmt_vendor_class *vendor_class;
211 struct ib_mad_mgmt_method_table *method;
212 int ret2, qpn;
213 unsigned long flags;
214 u8 mgmt_class, vclass;
215
216 /* Validate parameters */
217 qpn = get_spl_qp_index(qp_type);
Ira Weiny9ad13a42014-08-08 19:00:54 -0400218 if (qpn == -1) {
219 dev_notice(&device->dev,
220 "ib_register_mad_agent: invalid QP Type %d\n",
221 qp_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400223 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Ira Weiny9ad13a42014-08-08 19:00:54 -0400225 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
226 dev_notice(&device->dev,
227 "ib_register_mad_agent: invalid RMPP Version %u\n",
228 rmpp_version);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700229 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
232 /* Validate MAD registration request if supplied */
233 if (mad_reg_req) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400234 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
235 dev_notice(&device->dev,
236 "ib_register_mad_agent: invalid Class Version %u\n",
237 mad_reg_req->mgmt_class_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400239 }
240 if (!recv_handler) {
241 dev_notice(&device->dev,
242 "ib_register_mad_agent: no recv_handler\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
246 /*
247 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
248 * one in this range currently allowed
249 */
250 if (mad_reg_req->mgmt_class !=
Ira Weiny9ad13a42014-08-08 19:00:54 -0400251 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
252 dev_notice(&device->dev,
253 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
254 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 } else if (mad_reg_req->mgmt_class == 0) {
258 /*
259 * Class 0 is reserved in IBA and is used for
260 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
261 */
Ira Weiny9ad13a42014-08-08 19:00:54 -0400262 dev_notice(&device->dev,
263 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 goto error1;
265 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
266 /*
267 * If class is in "new" vendor range,
268 * ensure supplied OUI is not zero
269 */
Ira Weiny9ad13a42014-08-08 19:00:54 -0400270 if (!is_vendor_oui(mad_reg_req->oui)) {
271 dev_notice(&device->dev,
272 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
273 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 }
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800277 /* Make sure class supplied is consistent with RMPP */
Hal Rosenstock64cb9c62006-04-12 21:29:10 -0400278 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400279 if (rmpp_version) {
280 dev_notice(&device->dev,
281 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
282 mad_reg_req->mgmt_class);
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800283 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400284 }
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800285 }
Ira Weiny1471cb62014-08-08 19:00:56 -0400286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 /* Make sure class supplied is consistent with QP type */
288 if (qp_type == IB_QPT_SMI) {
289 if ((mad_reg_req->mgmt_class !=
290 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
291 (mad_reg_req->mgmt_class !=
Ira Weiny9ad13a42014-08-08 19:00:54 -0400292 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
293 dev_notice(&device->dev,
294 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
295 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400297 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 } else {
299 if ((mad_reg_req->mgmt_class ==
300 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
301 (mad_reg_req->mgmt_class ==
Ira Weiny9ad13a42014-08-08 19:00:54 -0400302 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
303 dev_notice(&device->dev,
304 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
305 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400307 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 }
309 } else {
310 /* No registration request supplied */
311 if (!send_handler)
312 goto error1;
Ira Weiny1471cb62014-08-08 19:00:56 -0400313 if (registration_flags & IB_MAD_USER_RMPP)
314 goto error1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 }
316
317 /* Validate device and port */
318 port_priv = ib_get_mad_port(device, port_num);
319 if (!port_priv) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400320 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 ret = ERR_PTR(-ENODEV);
322 goto error1;
323 }
324
Ira Weinyc8367c42011-05-19 18:19:28 -0700325 /* Verify the QP requested is supported. For example, Ethernet devices
326 * will not have QP0 */
327 if (!port_priv->qp_info[qpn].qp) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400328 dev_notice(&device->dev,
329 "ib_register_mad_agent: QP %d not supported\n", qpn);
Ira Weinyc8367c42011-05-19 18:19:28 -0700330 ret = ERR_PTR(-EPROTONOSUPPORT);
331 goto error1;
332 }
333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 /* Allocate structures */
Roland Dreierde6eb662005-11-02 07:23:14 -0800335 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 if (!mad_agent_priv) {
337 ret = ERR_PTR(-ENOMEM);
338 goto error1;
339 }
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700340
341 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
342 IB_ACCESS_LOCAL_WRITE);
343 if (IS_ERR(mad_agent_priv->agent.mr)) {
344 ret = ERR_PTR(-ENOMEM);
345 goto error2;
346 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 if (mad_reg_req) {
Julia Lawall9893e742010-05-15 23:22:38 +0200349 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 if (!reg_req) {
351 ret = ERR_PTR(-ENOMEM);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700352 goto error3;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 }
355
356 /* Now, fill in the various structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
358 mad_agent_priv->reg_req = reg_req;
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700359 mad_agent_priv->agent.rmpp_version = rmpp_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 mad_agent_priv->agent.device = device;
361 mad_agent_priv->agent.recv_handler = recv_handler;
362 mad_agent_priv->agent.send_handler = send_handler;
363 mad_agent_priv->agent.context = context;
364 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
365 mad_agent_priv->agent.port_num = port_num;
Ira Weiny0f29b462014-08-08 19:00:55 -0400366 mad_agent_priv->agent.flags = registration_flags;
Ralph Campbelld9620a42009-02-27 14:44:32 -0800367 spin_lock_init(&mad_agent_priv->lock);
368 INIT_LIST_HEAD(&mad_agent_priv->send_list);
369 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
370 INIT_LIST_HEAD(&mad_agent_priv->done_list);
371 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
372 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
373 INIT_LIST_HEAD(&mad_agent_priv->local_list);
374 INIT_WORK(&mad_agent_priv->local_work, local_completions);
375 atomic_set(&mad_agent_priv->refcount, 1);
376 init_completion(&mad_agent_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
378 spin_lock_irqsave(&port_priv->reg_lock, flags);
379 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
380
381 /*
382 * Make sure MAD registration (if supplied)
383 * is non overlapping with any existing ones
384 */
385 if (mad_reg_req) {
386 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
387 if (!is_vendor_class(mgmt_class)) {
388 class = port_priv->version[mad_reg_req->
389 mgmt_class_version].class;
390 if (class) {
391 method = class->method_table[mgmt_class];
392 if (method) {
393 if (method_in_use(&method,
394 mad_reg_req))
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700395 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 }
397 }
398 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
399 mgmt_class);
400 } else {
401 /* "New" vendor class range */
402 vendor = port_priv->version[mad_reg_req->
403 mgmt_class_version].vendor;
404 if (vendor) {
405 vclass = vendor_class_index(mgmt_class);
406 vendor_class = vendor->vendor_class[vclass];
407 if (vendor_class) {
408 if (is_vendor_method_in_use(
409 vendor_class,
410 mad_reg_req))
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700411 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 }
413 }
414 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
415 }
416 if (ret2) {
417 ret = ERR_PTR(ret2);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700418 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 }
420 }
421
422 /* Add mad agent into port's agent list */
423 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
424 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 return &mad_agent_priv->agent;
427
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700428error4:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
430 kfree(reg_req);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700431error3:
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700432 ib_dereg_mr(mad_agent_priv->agent.mr);
Adrian Bunk2012a112005-11-27 00:37:36 +0100433error2:
434 kfree(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435error1:
436 return ret;
437}
438EXPORT_SYMBOL(ib_register_mad_agent);
439
440static inline int is_snooping_sends(int mad_snoop_flags)
441{
442 return (mad_snoop_flags &
443 (/*IB_MAD_SNOOP_POSTED_SENDS |
444 IB_MAD_SNOOP_RMPP_SENDS |*/
445 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
446 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
447}
448
449static inline int is_snooping_recvs(int mad_snoop_flags)
450{
451 return (mad_snoop_flags &
452 (IB_MAD_SNOOP_RECVS /*|
453 IB_MAD_SNOOP_RMPP_RECVS*/));
454}
455
456static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
457 struct ib_mad_snoop_private *mad_snoop_priv)
458{
459 struct ib_mad_snoop_private **new_snoop_table;
460 unsigned long flags;
461 int i;
462
463 spin_lock_irqsave(&qp_info->snoop_lock, flags);
464 /* Check for empty slot in array. */
465 for (i = 0; i < qp_info->snoop_table_size; i++)
466 if (!qp_info->snoop_table[i])
467 break;
468
469 if (i == qp_info->snoop_table_size) {
470 /* Grow table. */
Roland Dreier528051742008-10-14 14:05:36 -0700471 new_snoop_table = krealloc(qp_info->snoop_table,
472 sizeof mad_snoop_priv *
473 (qp_info->snoop_table_size + 1),
474 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 if (!new_snoop_table) {
476 i = -ENOMEM;
477 goto out;
478 }
Roland Dreier528051742008-10-14 14:05:36 -0700479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 qp_info->snoop_table = new_snoop_table;
481 qp_info->snoop_table_size++;
482 }
483 qp_info->snoop_table[i] = mad_snoop_priv;
484 atomic_inc(&qp_info->snoop_count);
485out:
486 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
487 return i;
488}
489
490struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
491 u8 port_num,
492 enum ib_qp_type qp_type,
493 int mad_snoop_flags,
494 ib_mad_snoop_handler snoop_handler,
495 ib_mad_recv_handler recv_handler,
496 void *context)
497{
498 struct ib_mad_port_private *port_priv;
499 struct ib_mad_agent *ret;
500 struct ib_mad_snoop_private *mad_snoop_priv;
501 int qpn;
502
503 /* Validate parameters */
504 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
505 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
506 ret = ERR_PTR(-EINVAL);
507 goto error1;
508 }
509 qpn = get_spl_qp_index(qp_type);
510 if (qpn == -1) {
511 ret = ERR_PTR(-EINVAL);
512 goto error1;
513 }
514 port_priv = ib_get_mad_port(device, port_num);
515 if (!port_priv) {
516 ret = ERR_PTR(-ENODEV);
517 goto error1;
518 }
519 /* Allocate structures */
Roland Dreierde6eb662005-11-02 07:23:14 -0800520 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 if (!mad_snoop_priv) {
522 ret = ERR_PTR(-ENOMEM);
523 goto error1;
524 }
525
526 /* Now, fill in the various structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
528 mad_snoop_priv->agent.device = device;
529 mad_snoop_priv->agent.recv_handler = recv_handler;
530 mad_snoop_priv->agent.snoop_handler = snoop_handler;
531 mad_snoop_priv->agent.context = context;
532 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
533 mad_snoop_priv->agent.port_num = port_num;
534 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
Sean Hefty1b52fa982006-05-12 14:57:52 -0700535 init_completion(&mad_snoop_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 mad_snoop_priv->snoop_index = register_snoop_agent(
537 &port_priv->qp_info[qpn],
538 mad_snoop_priv);
539 if (mad_snoop_priv->snoop_index < 0) {
540 ret = ERR_PTR(mad_snoop_priv->snoop_index);
541 goto error2;
542 }
543
544 atomic_set(&mad_snoop_priv->refcount, 1);
545 return &mad_snoop_priv->agent;
546
547error2:
548 kfree(mad_snoop_priv);
549error1:
550 return ret;
551}
552EXPORT_SYMBOL(ib_register_mad_snoop);
553
Sean Hefty1b52fa982006-05-12 14:57:52 -0700554static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
555{
556 if (atomic_dec_and_test(&mad_agent_priv->refcount))
557 complete(&mad_agent_priv->comp);
558}
559
560static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
561{
562 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
563 complete(&mad_snoop_priv->comp);
564}
565
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
567{
568 struct ib_mad_port_private *port_priv;
569 unsigned long flags;
570
571 /* Note that we could still be handling received MADs */
572
573 /*
574 * Canceling all sends results in dropping received response
575 * MADs, preventing us from queuing additional work
576 */
577 cancel_mads(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 port_priv = mad_agent_priv->qp_info->port_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
581 spin_lock_irqsave(&port_priv->reg_lock, flags);
582 remove_mad_reg_req(mad_agent_priv);
583 list_del(&mad_agent_priv->agent_list);
584 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
585
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700586 flush_workqueue(port_priv->wq);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700587 ib_cancel_rmpp_recvs(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Sean Hefty1b52fa982006-05-12 14:57:52 -0700589 deref_mad_agent(mad_agent_priv);
590 wait_for_completion(&mad_agent_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
Jesper Juhl6044ec82005-11-07 01:01:32 -0800592 kfree(mad_agent_priv->reg_req);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700593 ib_dereg_mr(mad_agent_priv->agent.mr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 kfree(mad_agent_priv);
595}
596
597static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
598{
599 struct ib_mad_qp_info *qp_info;
600 unsigned long flags;
601
602 qp_info = mad_snoop_priv->qp_info;
603 spin_lock_irqsave(&qp_info->snoop_lock, flags);
604 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
605 atomic_dec(&qp_info->snoop_count);
606 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
607
Sean Hefty1b52fa982006-05-12 14:57:52 -0700608 deref_snoop_agent(mad_snoop_priv);
609 wait_for_completion(&mad_snoop_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
611 kfree(mad_snoop_priv);
612}
613
614/*
615 * ib_unregister_mad_agent - Unregisters a client from using MAD services
616 */
617int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
618{
619 struct ib_mad_agent_private *mad_agent_priv;
620 struct ib_mad_snoop_private *mad_snoop_priv;
621
622 /* If the TID is zero, the agent can only snoop. */
623 if (mad_agent->hi_tid) {
624 mad_agent_priv = container_of(mad_agent,
625 struct ib_mad_agent_private,
626 agent);
627 unregister_mad_agent(mad_agent_priv);
628 } else {
629 mad_snoop_priv = container_of(mad_agent,
630 struct ib_mad_snoop_private,
631 agent);
632 unregister_mad_snoop(mad_snoop_priv);
633 }
634 return 0;
635}
636EXPORT_SYMBOL(ib_unregister_mad_agent);
637
638static void dequeue_mad(struct ib_mad_list_head *mad_list)
639{
640 struct ib_mad_queue *mad_queue;
641 unsigned long flags;
642
643 BUG_ON(!mad_list->mad_queue);
644 mad_queue = mad_list->mad_queue;
645 spin_lock_irqsave(&mad_queue->lock, flags);
646 list_del(&mad_list->list);
647 mad_queue->count--;
648 spin_unlock_irqrestore(&mad_queue->lock, flags);
649}
650
651static void snoop_send(struct ib_mad_qp_info *qp_info,
Sean Hefty34816ad2005-10-25 10:51:39 -0700652 struct ib_mad_send_buf *send_buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 struct ib_mad_send_wc *mad_send_wc,
654 int mad_snoop_flags)
655{
656 struct ib_mad_snoop_private *mad_snoop_priv;
657 unsigned long flags;
658 int i;
659
660 spin_lock_irqsave(&qp_info->snoop_lock, flags);
661 for (i = 0; i < qp_info->snoop_table_size; i++) {
662 mad_snoop_priv = qp_info->snoop_table[i];
663 if (!mad_snoop_priv ||
664 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
665 continue;
666
667 atomic_inc(&mad_snoop_priv->refcount);
668 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
669 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
Sean Hefty34816ad2005-10-25 10:51:39 -0700670 send_buf, mad_send_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -0700671 deref_snoop_agent(mad_snoop_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 spin_lock_irqsave(&qp_info->snoop_lock, flags);
673 }
674 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
675}
676
677static void snoop_recv(struct ib_mad_qp_info *qp_info,
678 struct ib_mad_recv_wc *mad_recv_wc,
679 int mad_snoop_flags)
680{
681 struct ib_mad_snoop_private *mad_snoop_priv;
682 unsigned long flags;
683 int i;
684
685 spin_lock_irqsave(&qp_info->snoop_lock, flags);
686 for (i = 0; i < qp_info->snoop_table_size; i++) {
687 mad_snoop_priv = qp_info->snoop_table[i];
688 if (!mad_snoop_priv ||
689 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
690 continue;
691
692 atomic_inc(&mad_snoop_priv->refcount);
693 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
694 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
695 mad_recv_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -0700696 deref_snoop_agent(mad_snoop_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 spin_lock_irqsave(&qp_info->snoop_lock, flags);
698 }
699 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
700}
701
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200702static void build_smp_wc(struct ib_qp *qp,
703 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 struct ib_wc *wc)
705{
706 memset(wc, 0, sizeof *wc);
707 wc->wr_id = wr_id;
708 wc->status = IB_WC_SUCCESS;
709 wc->opcode = IB_WC_RECV;
710 wc->pkey_index = pkey_index;
711 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
712 wc->src_qp = IB_QP0;
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200713 wc->qp = qp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 wc->slid = slid;
715 wc->sl = 0;
716 wc->dlid_path_bits = 0;
717 wc->port_num = port_num;
718}
719
720/*
721 * Return 0 if SMP is to be sent
722 * Return 1 if SMP was consumed locally (whether or not solicited)
723 * Return < 0 if error
724 */
725static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
Sean Hefty34816ad2005-10-25 10:51:39 -0700726 struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727{
Hal Rosenstockde493d42007-04-02 11:24:07 -0400728 int ret = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -0700729 struct ib_smp *smp = mad_send_wr->send_buf.mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 unsigned long flags;
731 struct ib_mad_local_private *local;
732 struct ib_mad_private *mad_priv;
733 struct ib_mad_port_private *port_priv;
734 struct ib_mad_agent_private *recv_mad_agent = NULL;
735 struct ib_device *device = mad_agent_priv->agent.device;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -0400736 u8 port_num;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 struct ib_wc mad_wc;
Sean Hefty34816ad2005-10-25 10:51:39 -0700738 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Hal Rosenstock1bae4db2007-05-14 17:21:52 -0400740 if (device->node_type == RDMA_NODE_IB_SWITCH &&
741 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
742 port_num = send_wr->wr.ud.port_num;
743 else
744 port_num = mad_agent_priv->agent.port_num;
745
Ralph Campbell8cf3f042006-02-03 14:28:48 -0800746 /*
747 * Directed route handling starts if the initial LID routed part of
748 * a request or the ending LID routed part of a response is empty.
749 * If we are at the start of the LID routed part, don't update the
750 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
751 */
752 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
753 IB_LID_PERMISSIVE &&
Hal Rosenstockde493d42007-04-02 11:24:07 -0400754 smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
755 IB_SMI_DISCARD) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 ret = -EINVAL;
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400757 dev_err(&device->dev, "Invalid directed route\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 goto out;
759 }
Hal Rosenstockde493d42007-04-02 11:24:07 -0400760
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 /* Check to post send on QP or process locally */
Steve Welch727792d2007-10-23 15:06:10 -0700762 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
763 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 goto out;
765
766 local = kmalloc(sizeof *local, GFP_ATOMIC);
767 if (!local) {
768 ret = -ENOMEM;
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400769 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 goto out;
771 }
772 local->mad_priv = NULL;
773 local->recv_mad_agent = NULL;
774 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
775 if (!mad_priv) {
776 ret = -ENOMEM;
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400777 dev_err(&device->dev, "No memory for local response MAD\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 kfree(local);
779 goto out;
780 }
781
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200782 build_smp_wc(mad_agent_priv->agent.qp,
783 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
Sean Hefty97f52eb2005-08-13 21:05:57 -0700784 send_wr->wr.ud.pkey_index,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 send_wr->wr.ud.port_num, &mad_wc);
786
787 /* No GRH for DR SMP */
788 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
789 (struct ib_mad *)smp,
790 (struct ib_mad *)&mad_priv->mad);
791 switch (ret)
792 {
793 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
Ira Weiny96909302015-05-08 14:27:22 -0400794 if (ib_response_mad(&mad_priv->mad.mad.mad_hdr) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 mad_agent_priv->agent.recv_handler) {
796 local->mad_priv = mad_priv;
797 local->recv_mad_agent = mad_agent_priv;
798 /*
799 * Reference MAD agent until receive
800 * side of local completion handled
801 */
802 atomic_inc(&mad_agent_priv->refcount);
803 } else
804 kmem_cache_free(ib_mad_cache, mad_priv);
805 break;
806 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
807 kmem_cache_free(ib_mad_cache, mad_priv);
Ralph Campbell4780c192009-03-03 14:22:17 -0800808 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 case IB_MAD_RESULT_SUCCESS:
810 /* Treat like an incoming receive MAD */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
812 mad_agent_priv->agent.port_num);
813 if (port_priv) {
Steve Welch727792d2007-10-23 15:06:10 -0700814 memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 recv_mad_agent = find_mad_agent(port_priv,
Ira Weinyd94bd262015-06-06 14:38:22 -0400816 &mad_priv->mad.mad.mad_hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 }
818 if (!port_priv || !recv_mad_agent) {
Ralph Campbell4780c192009-03-03 14:22:17 -0800819 /*
820 * No receiving agent so drop packet and
821 * generate send completion.
822 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 kmem_cache_free(ib_mad_cache, mad_priv);
Ralph Campbell4780c192009-03-03 14:22:17 -0800824 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 }
826 local->mad_priv = mad_priv;
827 local->recv_mad_agent = recv_mad_agent;
828 break;
829 default:
830 kmem_cache_free(ib_mad_cache, mad_priv);
831 kfree(local);
832 ret = -EINVAL;
833 goto out;
834 }
835
Sean Hefty34816ad2005-10-25 10:51:39 -0700836 local->mad_send_wr = mad_send_wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 /* Reference MAD agent until send side of local completion handled */
838 atomic_inc(&mad_agent_priv->refcount);
839 /* Queue local completion to local list */
840 spin_lock_irqsave(&mad_agent_priv->lock, flags);
841 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
842 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
843 queue_work(mad_agent_priv->qp_info->port_priv->wq,
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700844 &mad_agent_priv->local_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 ret = 1;
846out:
847 return ret;
848}
849
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800850static int get_pad_size(int hdr_len, int data_len)
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700851{
852 int seg_size, pad;
853
854 seg_size = sizeof(struct ib_mad) - hdr_len;
855 if (data_len && seg_size) {
856 pad = seg_size - data_len % seg_size;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800857 return pad == seg_size ? 0 : pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700858 } else
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800859 return seg_size;
860}
861
862static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
863{
864 struct ib_rmpp_segment *s, *t;
865
866 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
867 list_del(&s->list);
868 kfree(s);
869 }
870}
871
872static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
873 gfp_t gfp_mask)
874{
875 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
876 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
877 struct ib_rmpp_segment *seg = NULL;
878 int left, seg_size, pad;
879
880 send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
881 seg_size = send_buf->seg_size;
882 pad = send_wr->pad;
883
884 /* Allocate data segments. */
885 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
886 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
887 if (!seg) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400888 dev_err(&send_buf->mad_agent->device->dev,
889 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
890 sizeof (*seg) + seg_size, gfp_mask);
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800891 free_send_rmpp_list(send_wr);
892 return -ENOMEM;
893 }
894 seg->num = ++send_buf->seg_count;
895 list_add_tail(&seg->list, &send_wr->rmpp_list);
896 }
897
898 /* Zero any padding */
899 if (pad)
900 memset(seg->data + seg_size - pad, 0, pad);
901
902 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
903 agent.rmpp_version;
904 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
905 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
906
907 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
908 struct ib_rmpp_segment, list);
909 send_wr->last_ack_seg = send_wr->cur_seg;
910 return 0;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700911}
912
Ira Weinyf766c582015-05-08 14:27:24 -0400913int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
Ira Weiny1471cb62014-08-08 19:00:56 -0400914{
915 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
916}
917EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
918
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700919struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
920 u32 remote_qpn, u16 pkey_index,
Sean Hefty34816ad2005-10-25 10:51:39 -0700921 int rmpp_active,
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700922 int hdr_len, int data_len,
Al Virodd0fc662005-10-07 07:46:04 +0100923 gfp_t gfp_mask)
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700924{
925 struct ib_mad_agent_private *mad_agent_priv;
Sean Hefty34816ad2005-10-25 10:51:39 -0700926 struct ib_mad_send_wr_private *mad_send_wr;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800927 int pad, message_size, ret, size;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700928 void *buf;
929
Sean Hefty34816ad2005-10-25 10:51:39 -0700930 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
931 agent);
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800932 pad = get_pad_size(hdr_len, data_len);
933 message_size = hdr_len + data_len + pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700934
Ira Weiny1471cb62014-08-08 19:00:56 -0400935 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
936 if (!rmpp_active && message_size > sizeof(struct ib_mad))
937 return ERR_PTR(-EINVAL);
938 } else
939 if (rmpp_active || message_size > sizeof(struct ib_mad))
940 return ERR_PTR(-EINVAL);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700941
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800942 size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
943 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700944 if (!buf)
945 return ERR_PTR(-ENOMEM);
946
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800947 mad_send_wr = buf + size;
948 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
Sean Hefty34816ad2005-10-25 10:51:39 -0700949 mad_send_wr->send_buf.mad = buf;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800950 mad_send_wr->send_buf.hdr_len = hdr_len;
951 mad_send_wr->send_buf.data_len = data_len;
952 mad_send_wr->pad = pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700953
Sean Hefty34816ad2005-10-25 10:51:39 -0700954 mad_send_wr->mad_agent_priv = mad_agent_priv;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800955 mad_send_wr->sg_list[0].length = hdr_len;
Sean Hefty34816ad2005-10-25 10:51:39 -0700956 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800957 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
958 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700959
Sean Hefty34816ad2005-10-25 10:51:39 -0700960 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
961 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800962 mad_send_wr->send_wr.num_sge = 2;
Sean Hefty34816ad2005-10-25 10:51:39 -0700963 mad_send_wr->send_wr.opcode = IB_WR_SEND;
964 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
965 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
966 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
967 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700968
969 if (rmpp_active) {
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800970 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
971 if (ret) {
972 kfree(buf);
973 return ERR_PTR(ret);
974 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700975 }
976
Sean Hefty34816ad2005-10-25 10:51:39 -0700977 mad_send_wr->send_buf.mad_agent = mad_agent;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700978 atomic_inc(&mad_agent_priv->refcount);
Sean Hefty34816ad2005-10-25 10:51:39 -0700979 return &mad_send_wr->send_buf;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700980}
981EXPORT_SYMBOL(ib_create_send_mad);
982
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800983int ib_get_mad_data_offset(u8 mgmt_class)
984{
985 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
986 return IB_MGMT_SA_HDR;
987 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
988 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
989 (mgmt_class == IB_MGMT_CLASS_BIS))
990 return IB_MGMT_DEVICE_HDR;
991 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
992 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
993 return IB_MGMT_VENDOR_HDR;
994 else
995 return IB_MGMT_MAD_HDR;
996}
997EXPORT_SYMBOL(ib_get_mad_data_offset);
998
999int ib_is_mad_class_rmpp(u8 mgmt_class)
1000{
1001 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1002 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1003 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1004 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1005 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1006 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1007 return 1;
1008 return 0;
1009}
1010EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1011
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001012void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1013{
1014 struct ib_mad_send_wr_private *mad_send_wr;
1015 struct list_head *list;
1016
1017 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1018 send_buf);
1019 list = &mad_send_wr->cur_seg->list;
1020
1021 if (mad_send_wr->cur_seg->num < seg_num) {
1022 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1023 if (mad_send_wr->cur_seg->num == seg_num)
1024 break;
1025 } else if (mad_send_wr->cur_seg->num > seg_num) {
1026 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1027 if (mad_send_wr->cur_seg->num == seg_num)
1028 break;
1029 }
1030 return mad_send_wr->cur_seg->data;
1031}
1032EXPORT_SYMBOL(ib_get_rmpp_segment);
1033
1034static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1035{
1036 if (mad_send_wr->send_buf.seg_count)
1037 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1038 mad_send_wr->seg_num);
1039 else
1040 return mad_send_wr->send_buf.mad +
1041 mad_send_wr->send_buf.hdr_len;
1042}
1043
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001044void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1045{
1046 struct ib_mad_agent_private *mad_agent_priv;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001047 struct ib_mad_send_wr_private *mad_send_wr;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001048
1049 mad_agent_priv = container_of(send_buf->mad_agent,
1050 struct ib_mad_agent_private, agent);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001051 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1052 send_buf);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001053
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001054 free_send_rmpp_list(mad_send_wr);
1055 kfree(send_buf->mad);
Sean Hefty1b52fa982006-05-12 14:57:52 -07001056 deref_mad_agent(mad_agent_priv);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001057}
1058EXPORT_SYMBOL(ib_free_send_mad);
1059
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001060int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061{
1062 struct ib_mad_qp_info *qp_info;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001063 struct list_head *list;
Sean Hefty34816ad2005-10-25 10:51:39 -07001064 struct ib_send_wr *bad_send_wr;
1065 struct ib_mad_agent *mad_agent;
1066 struct ib_sge *sge;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 unsigned long flags;
1068 int ret;
1069
Hal Rosenstockf8197a42005-07-27 11:45:24 -07001070 /* Set WR ID to find mad_send_wr upon completion */
Hal Rosenstockd760ce82005-07-27 11:45:25 -07001071 qp_info = mad_send_wr->mad_agent_priv->qp_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1073 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1074
Sean Hefty34816ad2005-10-25 10:51:39 -07001075 mad_agent = mad_send_wr->send_buf.mad_agent;
1076 sge = mad_send_wr->sg_list;
Ralph Campbell15271062006-12-12 14:28:30 -08001077 sge[0].addr = ib_dma_map_single(mad_agent->device,
1078 mad_send_wr->send_buf.mad,
1079 sge[0].length,
1080 DMA_TO_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02001081 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1082 return -ENOMEM;
1083
Ralph Campbell15271062006-12-12 14:28:30 -08001084 mad_send_wr->header_mapping = sge[0].addr;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001085
Ralph Campbell15271062006-12-12 14:28:30 -08001086 sge[1].addr = ib_dma_map_single(mad_agent->device,
1087 ib_get_payload(mad_send_wr),
1088 sge[1].length,
1089 DMA_TO_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02001090 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1091 ib_dma_unmap_single(mad_agent->device,
1092 mad_send_wr->header_mapping,
1093 sge[0].length, DMA_TO_DEVICE);
1094 return -ENOMEM;
1095 }
Ralph Campbell15271062006-12-12 14:28:30 -08001096 mad_send_wr->payload_mapping = sge[1].addr;
Sean Hefty34816ad2005-10-25 10:51:39 -07001097
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001099 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
Sean Hefty34816ad2005-10-25 10:51:39 -07001100 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1101 &bad_send_wr);
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001102 list = &qp_info->send_queue.list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 ret = 0;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001105 list = &qp_info->overflow_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 }
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001107
1108 if (!ret) {
1109 qp_info->send_queue.count++;
1110 list_add_tail(&mad_send_wr->mad_list.list, list);
1111 }
1112 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001113 if (ret) {
Ralph Campbell15271062006-12-12 14:28:30 -08001114 ib_dma_unmap_single(mad_agent->device,
1115 mad_send_wr->header_mapping,
1116 sge[0].length, DMA_TO_DEVICE);
1117 ib_dma_unmap_single(mad_agent->device,
1118 mad_send_wr->payload_mapping,
1119 sge[1].length, DMA_TO_DEVICE);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001120 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 return ret;
1122}
1123
1124/*
1125 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1126 * with the registered client
1127 */
Sean Hefty34816ad2005-10-25 10:51:39 -07001128int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1129 struct ib_mad_send_buf **bad_send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 struct ib_mad_agent_private *mad_agent_priv;
Sean Hefty34816ad2005-10-25 10:51:39 -07001132 struct ib_mad_send_buf *next_send_buf;
1133 struct ib_mad_send_wr_private *mad_send_wr;
1134 unsigned long flags;
1135 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
1137 /* Walk list of send WRs and post each on send list */
Sean Hefty34816ad2005-10-25 10:51:39 -07001138 for (; send_buf; send_buf = next_send_buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
Sean Hefty34816ad2005-10-25 10:51:39 -07001140 mad_send_wr = container_of(send_buf,
1141 struct ib_mad_send_wr_private,
1142 send_buf);
1143 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
Sean Hefty34816ad2005-10-25 10:51:39 -07001145 if (!send_buf->mad_agent->send_handler ||
1146 (send_buf->timeout_ms &&
1147 !send_buf->mad_agent->recv_handler)) {
1148 ret = -EINVAL;
1149 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 }
1151
Hal Rosenstock618a3c02006-03-28 16:40:04 -08001152 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1153 if (mad_agent_priv->agent.rmpp_version) {
1154 ret = -EINVAL;
1155 goto error;
1156 }
1157 }
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 /*
1160 * Save pointer to next work request to post in case the
1161 * current one completes, and the user modifies the work
1162 * request associated with the completion
1163 */
Sean Hefty34816ad2005-10-25 10:51:39 -07001164 next_send_buf = send_buf->next;
1165 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166
Sean Hefty34816ad2005-10-25 10:51:39 -07001167 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1168 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1169 ret = handle_outgoing_dr_smp(mad_agent_priv,
1170 mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 if (ret < 0) /* error */
Sean Hefty34816ad2005-10-25 10:51:39 -07001172 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 else if (ret == 1) /* locally consumed */
Sean Hefty34816ad2005-10-25 10:51:39 -07001174 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 }
1176
Sean Hefty34816ad2005-10-25 10:51:39 -07001177 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 /* Timeout will be updated after send completes */
Sean Hefty34816ad2005-10-25 10:51:39 -07001179 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
Sean Hefty4fc8cd42007-11-27 00:11:04 -08001180 mad_send_wr->max_retries = send_buf->retries;
1181 mad_send_wr->retries_left = send_buf->retries;
1182 send_buf->retries = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001183 /* Reference for work request to QP + response */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1185 mad_send_wr->status = IB_WC_SUCCESS;
1186
1187 /* Reference MAD agent until send completes */
1188 atomic_inc(&mad_agent_priv->refcount);
1189 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1190 list_add_tail(&mad_send_wr->agent_list,
1191 &mad_agent_priv->send_list);
1192 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1193
Ira Weiny1471cb62014-08-08 19:00:56 -04001194 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001195 ret = ib_send_rmpp_mad(mad_send_wr);
1196 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1197 ret = ib_send_mad(mad_send_wr);
1198 } else
1199 ret = ib_send_mad(mad_send_wr);
1200 if (ret < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 /* Fail send request */
1202 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1203 list_del(&mad_send_wr->agent_list);
1204 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1205 atomic_dec(&mad_agent_priv->refcount);
Sean Hefty34816ad2005-10-25 10:51:39 -07001206 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 }
1209 return 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001210error:
1211 if (bad_send_buf)
1212 *bad_send_buf = send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 return ret;
1214}
1215EXPORT_SYMBOL(ib_post_send_mad);
1216
1217/*
1218 * ib_free_recv_mad - Returns data buffers used to receive
1219 * a MAD to the access layer
1220 */
1221void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1222{
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001223 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 struct ib_mad_private_header *mad_priv_hdr;
1225 struct ib_mad_private *priv;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001226 struct list_head free_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001228 INIT_LIST_HEAD(&free_list);
1229 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001231 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1232 &free_list, list) {
1233 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1234 recv_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 mad_priv_hdr = container_of(mad_recv_wc,
1236 struct ib_mad_private_header,
1237 recv_wc);
1238 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1239 header);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001240 kmem_cache_free(ib_mad_cache, priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242}
1243EXPORT_SYMBOL(ib_free_recv_mad);
1244
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1246 u8 rmpp_version,
1247 ib_mad_send_handler send_handler,
1248 ib_mad_recv_handler recv_handler,
1249 void *context)
1250{
1251 return ERR_PTR(-EINVAL); /* XXX: for now */
1252}
1253EXPORT_SYMBOL(ib_redirect_mad_qp);
1254
1255int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1256 struct ib_wc *wc)
1257{
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001258 dev_err(&mad_agent->device->dev,
1259 "ib_process_mad_wc() not implemented yet\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 return 0;
1261}
1262EXPORT_SYMBOL(ib_process_mad_wc);
1263
1264static int method_in_use(struct ib_mad_mgmt_method_table **method,
1265 struct ib_mad_reg_req *mad_reg_req)
1266{
1267 int i;
1268
Akinobu Mita19b629f2010-03-05 13:41:38 -08001269 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 if ((*method)->agent[i]) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001271 pr_err("Method %d already in use\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 return -EINVAL;
1273 }
1274 }
1275 return 0;
1276}
1277
1278static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1279{
1280 /* Allocate management method table */
Roland Dreierde6eb662005-11-02 07:23:14 -08001281 *method = kzalloc(sizeof **method, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 if (!*method) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001283 pr_err("No memory for ib_mad_mgmt_method_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 return -ENOMEM;
1285 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
1287 return 0;
1288}
1289
1290/*
1291 * Check to see if there are any methods still in use
1292 */
1293static int check_method_table(struct ib_mad_mgmt_method_table *method)
1294{
1295 int i;
1296
1297 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1298 if (method->agent[i])
1299 return 1;
1300 return 0;
1301}
1302
1303/*
1304 * Check to see if there are any method tables for this class still in use
1305 */
1306static int check_class_table(struct ib_mad_mgmt_class_table *class)
1307{
1308 int i;
1309
1310 for (i = 0; i < MAX_MGMT_CLASS; i++)
1311 if (class->method_table[i])
1312 return 1;
1313 return 0;
1314}
1315
1316static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1317{
1318 int i;
1319
1320 for (i = 0; i < MAX_MGMT_OUI; i++)
1321 if (vendor_class->method_table[i])
1322 return 1;
1323 return 0;
1324}
1325
1326static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
Ira Weinyd94bd262015-06-06 14:38:22 -04001327 const char *oui)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328{
1329 int i;
1330
1331 for (i = 0; i < MAX_MGMT_OUI; i++)
Roland Dreier3cd96562006-09-22 15:22:46 -07001332 /* Is there matching OUI for this vendor class ? */
1333 if (!memcmp(vendor_class->oui[i], oui, 3))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 return i;
1335
1336 return -1;
1337}
1338
1339static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1340{
1341 int i;
1342
1343 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1344 if (vendor->vendor_class[i])
1345 return 1;
1346
1347 return 0;
1348}
1349
1350static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1351 struct ib_mad_agent_private *agent)
1352{
1353 int i;
1354
1355 /* Remove any methods for this mad agent */
1356 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1357 if (method->agent[i] == agent) {
1358 method->agent[i] = NULL;
1359 }
1360 }
1361}
1362
1363static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1364 struct ib_mad_agent_private *agent_priv,
1365 u8 mgmt_class)
1366{
1367 struct ib_mad_port_private *port_priv;
1368 struct ib_mad_mgmt_class_table **class;
1369 struct ib_mad_mgmt_method_table **method;
1370 int i, ret;
1371
1372 port_priv = agent_priv->qp_info->port_priv;
1373 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1374 if (!*class) {
1375 /* Allocate management class table for "new" class version */
Roland Dreierde6eb662005-11-02 07:23:14 -08001376 *class = kzalloc(sizeof **class, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 if (!*class) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001378 dev_err(&agent_priv->agent.device->dev,
1379 "No memory for ib_mad_mgmt_class_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 ret = -ENOMEM;
1381 goto error1;
1382 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001383
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 /* Allocate method table for this management class */
1385 method = &(*class)->method_table[mgmt_class];
1386 if ((ret = allocate_method_table(method)))
1387 goto error2;
1388 } else {
1389 method = &(*class)->method_table[mgmt_class];
1390 if (!*method) {
1391 /* Allocate method table for this management class */
1392 if ((ret = allocate_method_table(method)))
1393 goto error1;
1394 }
1395 }
1396
1397 /* Now, make sure methods are not already in use */
1398 if (method_in_use(method, mad_reg_req))
1399 goto error3;
1400
1401 /* Finally, add in methods being registered */
Akinobu Mita19b629f2010-03-05 13:41:38 -08001402 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 (*method)->agent[i] = agent_priv;
Akinobu Mita19b629f2010-03-05 13:41:38 -08001404
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 return 0;
1406
1407error3:
1408 /* Remove any methods for this mad agent */
1409 remove_methods_mad_agent(*method, agent_priv);
1410 /* Now, check to see if there are any methods in use */
1411 if (!check_method_table(*method)) {
1412 /* If not, release management method table */
1413 kfree(*method);
1414 *method = NULL;
1415 }
1416 ret = -EINVAL;
1417 goto error1;
1418error2:
1419 kfree(*class);
1420 *class = NULL;
1421error1:
1422 return ret;
1423}
1424
1425static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1426 struct ib_mad_agent_private *agent_priv)
1427{
1428 struct ib_mad_port_private *port_priv;
1429 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1430 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1431 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1432 struct ib_mad_mgmt_method_table **method;
1433 int i, ret = -ENOMEM;
1434 u8 vclass;
1435
1436 /* "New" vendor (with OUI) class */
1437 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1438 port_priv = agent_priv->qp_info->port_priv;
1439 vendor_table = &port_priv->version[
1440 mad_reg_req->mgmt_class_version].vendor;
1441 if (!*vendor_table) {
1442 /* Allocate mgmt vendor class table for "new" class version */
Roland Dreierde6eb662005-11-02 07:23:14 -08001443 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 if (!vendor) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001445 dev_err(&agent_priv->agent.device->dev,
1446 "No memory for ib_mad_mgmt_vendor_class_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 goto error1;
1448 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001449
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 *vendor_table = vendor;
1451 }
1452 if (!(*vendor_table)->vendor_class[vclass]) {
1453 /* Allocate table for this management vendor class */
Roland Dreierde6eb662005-11-02 07:23:14 -08001454 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 if (!vendor_class) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001456 dev_err(&agent_priv->agent.device->dev,
1457 "No memory for ib_mad_mgmt_vendor_class\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 goto error2;
1459 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 (*vendor_table)->vendor_class[vclass] = vendor_class;
1462 }
1463 for (i = 0; i < MAX_MGMT_OUI; i++) {
1464 /* Is there matching OUI for this vendor class ? */
1465 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1466 mad_reg_req->oui, 3)) {
1467 method = &(*vendor_table)->vendor_class[
1468 vclass]->method_table[i];
1469 BUG_ON(!*method);
1470 goto check_in_use;
1471 }
1472 }
1473 for (i = 0; i < MAX_MGMT_OUI; i++) {
1474 /* OUI slot available ? */
1475 if (!is_vendor_oui((*vendor_table)->vendor_class[
1476 vclass]->oui[i])) {
1477 method = &(*vendor_table)->vendor_class[
1478 vclass]->method_table[i];
1479 BUG_ON(*method);
1480 /* Allocate method table for this OUI */
1481 if ((ret = allocate_method_table(method)))
1482 goto error3;
1483 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1484 mad_reg_req->oui, 3);
1485 goto check_in_use;
1486 }
1487 }
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001488 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 goto error3;
1490
1491check_in_use:
1492 /* Now, make sure methods are not already in use */
1493 if (method_in_use(method, mad_reg_req))
1494 goto error4;
1495
1496 /* Finally, add in methods being registered */
Akinobu Mita19b629f2010-03-05 13:41:38 -08001497 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 (*method)->agent[i] = agent_priv;
Akinobu Mita19b629f2010-03-05 13:41:38 -08001499
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 return 0;
1501
1502error4:
1503 /* Remove any methods for this mad agent */
1504 remove_methods_mad_agent(*method, agent_priv);
1505 /* Now, check to see if there are any methods in use */
1506 if (!check_method_table(*method)) {
1507 /* If not, release management method table */
1508 kfree(*method);
1509 *method = NULL;
1510 }
1511 ret = -EINVAL;
1512error3:
1513 if (vendor_class) {
1514 (*vendor_table)->vendor_class[vclass] = NULL;
1515 kfree(vendor_class);
1516 }
1517error2:
1518 if (vendor) {
1519 *vendor_table = NULL;
1520 kfree(vendor);
1521 }
1522error1:
1523 return ret;
1524}
1525
1526static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1527{
1528 struct ib_mad_port_private *port_priv;
1529 struct ib_mad_mgmt_class_table *class;
1530 struct ib_mad_mgmt_method_table *method;
1531 struct ib_mad_mgmt_vendor_class_table *vendor;
1532 struct ib_mad_mgmt_vendor_class *vendor_class;
1533 int index;
1534 u8 mgmt_class;
1535
1536 /*
1537 * Was MAD registration request supplied
1538 * with original registration ?
1539 */
1540 if (!agent_priv->reg_req) {
1541 goto out;
1542 }
1543
1544 port_priv = agent_priv->qp_info->port_priv;
1545 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1546 class = port_priv->version[
1547 agent_priv->reg_req->mgmt_class_version].class;
1548 if (!class)
1549 goto vendor_check;
1550
1551 method = class->method_table[mgmt_class];
1552 if (method) {
1553 /* Remove any methods for this mad agent */
1554 remove_methods_mad_agent(method, agent_priv);
1555 /* Now, check to see if there are any methods still in use */
1556 if (!check_method_table(method)) {
1557 /* If not, release management method table */
1558 kfree(method);
1559 class->method_table[mgmt_class] = NULL;
1560 /* Any management classes left ? */
1561 if (!check_class_table(class)) {
1562 /* If not, release management class table */
1563 kfree(class);
1564 port_priv->version[
1565 agent_priv->reg_req->
1566 mgmt_class_version].class = NULL;
1567 }
1568 }
1569 }
1570
1571vendor_check:
1572 if (!is_vendor_class(mgmt_class))
1573 goto out;
1574
1575 /* normalize mgmt_class to vendor range 2 */
1576 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1577 vendor = port_priv->version[
1578 agent_priv->reg_req->mgmt_class_version].vendor;
1579
1580 if (!vendor)
1581 goto out;
1582
1583 vendor_class = vendor->vendor_class[mgmt_class];
1584 if (vendor_class) {
1585 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1586 if (index < 0)
1587 goto out;
1588 method = vendor_class->method_table[index];
1589 if (method) {
1590 /* Remove any methods for this mad agent */
1591 remove_methods_mad_agent(method, agent_priv);
1592 /*
1593 * Now, check to see if there are
1594 * any methods still in use
1595 */
1596 if (!check_method_table(method)) {
1597 /* If not, release management method table */
1598 kfree(method);
1599 vendor_class->method_table[index] = NULL;
1600 memset(vendor_class->oui[index], 0, 3);
1601 /* Any OUIs left ? */
1602 if (!check_vendor_class(vendor_class)) {
1603 /* If not, release vendor class table */
1604 kfree(vendor_class);
1605 vendor->vendor_class[mgmt_class] = NULL;
1606 /* Any other vendor classes left ? */
1607 if (!check_vendor_table(vendor)) {
1608 kfree(vendor);
1609 port_priv->version[
1610 agent_priv->reg_req->
1611 mgmt_class_version].
1612 vendor = NULL;
1613 }
1614 }
1615 }
1616 }
1617 }
1618
1619out:
1620 return;
1621}
1622
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623static struct ib_mad_agent_private *
1624find_mad_agent(struct ib_mad_port_private *port_priv,
Ira Weinyd94bd262015-06-06 14:38:22 -04001625 const struct ib_mad_hdr *mad_hdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626{
1627 struct ib_mad_agent_private *mad_agent = NULL;
1628 unsigned long flags;
1629
1630 spin_lock_irqsave(&port_priv->reg_lock, flags);
Ira Weinyd94bd262015-06-06 14:38:22 -04001631 if (ib_response_mad(mad_hdr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 u32 hi_tid;
1633 struct ib_mad_agent_private *entry;
1634
1635 /*
1636 * Routing is based on high 32 bits of transaction ID
1637 * of MAD.
1638 */
Ira Weinyd94bd262015-06-06 14:38:22 -04001639 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
Sean Hefty34816ad2005-10-25 10:51:39 -07001640 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 if (entry->agent.hi_tid == hi_tid) {
1642 mad_agent = entry;
1643 break;
1644 }
1645 }
1646 } else {
1647 struct ib_mad_mgmt_class_table *class;
1648 struct ib_mad_mgmt_method_table *method;
1649 struct ib_mad_mgmt_vendor_class_table *vendor;
1650 struct ib_mad_mgmt_vendor_class *vendor_class;
Ira Weinyd94bd262015-06-06 14:38:22 -04001651 const struct ib_vendor_mad *vendor_mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 int index;
1653
1654 /*
1655 * Routing is based on version, class, and method
1656 * For "newer" vendor MADs, also based on OUI
1657 */
Ira Weinyd94bd262015-06-06 14:38:22 -04001658 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 goto out;
Ira Weinyd94bd262015-06-06 14:38:22 -04001660 if (!is_vendor_class(mad_hdr->mgmt_class)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 class = port_priv->version[
Ira Weinyd94bd262015-06-06 14:38:22 -04001662 mad_hdr->class_version].class;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 if (!class)
1664 goto out;
Ira Weinyd94bd262015-06-06 14:38:22 -04001665 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
Hefty, Seanb7ab0b12011-10-06 09:33:05 -07001666 IB_MGMT_MAX_METHODS)
1667 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 method = class->method_table[convert_mgmt_class(
Ira Weinyd94bd262015-06-06 14:38:22 -04001669 mad_hdr->mgmt_class)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 if (method)
Ira Weinyd94bd262015-06-06 14:38:22 -04001671 mad_agent = method->agent[mad_hdr->method &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 ~IB_MGMT_METHOD_RESP];
1673 } else {
1674 vendor = port_priv->version[
Ira Weinyd94bd262015-06-06 14:38:22 -04001675 mad_hdr->class_version].vendor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 if (!vendor)
1677 goto out;
1678 vendor_class = vendor->vendor_class[vendor_class_index(
Ira Weinyd94bd262015-06-06 14:38:22 -04001679 mad_hdr->mgmt_class)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 if (!vendor_class)
1681 goto out;
1682 /* Find matching OUI */
Ira Weinyd94bd262015-06-06 14:38:22 -04001683 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1685 if (index == -1)
1686 goto out;
1687 method = vendor_class->method_table[index];
1688 if (method) {
Ira Weinyd94bd262015-06-06 14:38:22 -04001689 mad_agent = method->agent[mad_hdr->method &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 ~IB_MGMT_METHOD_RESP];
1691 }
1692 }
1693 }
1694
1695 if (mad_agent) {
1696 if (mad_agent->agent.recv_handler)
1697 atomic_inc(&mad_agent->refcount);
1698 else {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001699 dev_notice(&port_priv->device->dev,
1700 "No receive handler for client %p on port %d\n",
1701 &mad_agent->agent, port_priv->port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 mad_agent = NULL;
1703 }
1704 }
1705out:
1706 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1707
1708 return mad_agent;
1709}
1710
Ira Weiny77f60832015-05-08 14:27:21 -04001711static int validate_mad(const struct ib_mad_hdr *mad_hdr, u32 qp_num)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712{
1713 int valid = 0;
1714
1715 /* Make sure MAD base version is understood */
Ira Weiny77f60832015-05-08 14:27:21 -04001716 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001717 pr_err("MAD received with unsupported base version %d\n",
Ira Weiny77f60832015-05-08 14:27:21 -04001718 mad_hdr->base_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 goto out;
1720 }
1721
1722 /* Filter SMI packets sent to other than QP0 */
Ira Weiny77f60832015-05-08 14:27:21 -04001723 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1724 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 if (qp_num == 0)
1726 valid = 1;
1727 } else {
1728 /* Filter GSI packets sent to QP0 */
1729 if (qp_num != 0)
1730 valid = 1;
1731 }
1732
1733out:
1734 return valid;
1735}
1736
Ira Weinyf766c582015-05-08 14:27:24 -04001737static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1738 const struct ib_mad_hdr *mad_hdr)
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001739{
1740 struct ib_rmpp_mad *rmpp_mad;
1741
1742 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1743 return !mad_agent_priv->agent.rmpp_version ||
Ira Weiny1471cb62014-08-08 19:00:56 -04001744 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001745 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1746 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1747 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1748}
1749
Ira Weiny8bf4b302015-05-08 14:27:23 -04001750static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1751 const struct ib_mad_recv_wc *rwc)
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001752{
Ira Weiny8bf4b302015-05-08 14:27:23 -04001753 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001754 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1755}
1756
Ira Weinyf766c582015-05-08 14:27:24 -04001757static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1758 const struct ib_mad_send_wr_private *wr,
1759 const struct ib_mad_recv_wc *rwc )
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001760{
1761 struct ib_ah_attr attr;
1762 u8 send_resp, rcv_resp;
Jack Morgenstein9874e742006-06-17 20:37:34 -07001763 union ib_gid sgid;
1764 struct ib_device *device = mad_agent_priv->agent.device;
1765 u8 port_num = mad_agent_priv->agent.port_num;
1766 u8 lmc;
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001767
Ira Weiny96909302015-05-08 14:27:22 -04001768 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1769 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001770
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001771 if (send_resp == rcv_resp)
1772 /* both requests, or both responses. GIDs different */
1773 return 0;
1774
1775 if (ib_query_ah(wr->send_buf.ah, &attr))
1776 /* Assume not equal, to avoid false positives. */
1777 return 0;
1778
Jack Morgenstein9874e742006-06-17 20:37:34 -07001779 if (!!(attr.ah_flags & IB_AH_GRH) !=
1780 !!(rwc->wc->wc_flags & IB_WC_GRH))
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001781 /* one has GID, other does not. Assume different */
1782 return 0;
Jack Morgenstein9874e742006-06-17 20:37:34 -07001783
1784 if (!send_resp && rcv_resp) {
1785 /* is request/response. */
1786 if (!(attr.ah_flags & IB_AH_GRH)) {
1787 if (ib_get_cached_lmc(device, port_num, &lmc))
1788 return 0;
1789 return (!lmc || !((attr.src_path_bits ^
1790 rwc->wc->dlid_path_bits) &
1791 ((1 << lmc) - 1)));
1792 } else {
1793 if (ib_get_cached_gid(device, port_num,
1794 attr.grh.sgid_index, &sgid))
1795 return 0;
1796 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1797 16);
1798 }
1799 }
1800
1801 if (!(attr.ah_flags & IB_AH_GRH))
1802 return attr.dlid == rwc->wc->slid;
1803 else
1804 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1805 16);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001806}
Jack Morgenstein9874e742006-06-17 20:37:34 -07001807
1808static inline int is_direct(u8 class)
1809{
1810 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1811}
1812
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001813struct ib_mad_send_wr_private*
Ira Weinyf766c582015-05-08 14:27:24 -04001814ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1815 const struct ib_mad_recv_wc *wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816{
Jack Morgenstein9874e742006-06-17 20:37:34 -07001817 struct ib_mad_send_wr_private *wr;
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001818 struct ib_mad *mad;
1819
Jack Morgenstein9874e742006-06-17 20:37:34 -07001820 mad = (struct ib_mad *)wc->recv_buf.mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821
Jack Morgenstein9874e742006-06-17 20:37:34 -07001822 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1823 if ((wr->tid == mad->mad_hdr.tid) &&
1824 rcv_has_same_class(wr, wc) &&
1825 /*
1826 * Don't check GID for direct routed MADs.
1827 * These might have permissive LIDs.
1828 */
1829 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1830 rcv_has_same_gid(mad_agent_priv, wr, wc)))
Roland Dreier39798692006-11-13 09:38:07 -08001831 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 }
1833
1834 /*
1835 * It's possible to receive the response before we've
1836 * been notified that the send has completed
1837 */
Jack Morgenstein9874e742006-06-17 20:37:34 -07001838 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
Ira Weinyc597eee2015-05-08 13:10:03 -04001839 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
Jack Morgenstein9874e742006-06-17 20:37:34 -07001840 wr->tid == mad->mad_hdr.tid &&
1841 wr->timeout &&
1842 rcv_has_same_class(wr, wc) &&
1843 /*
1844 * Don't check GID for direct routed MADs.
1845 * These might have permissive LIDs.
1846 */
1847 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1848 rcv_has_same_gid(mad_agent_priv, wr, wc)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 /* Verify request has not been canceled */
Jack Morgenstein9874e742006-06-17 20:37:34 -07001850 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 }
1852 return NULL;
1853}
1854
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001855void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001856{
1857 mad_send_wr->timeout = 0;
Akinobu Mita179e0912006-06-26 00:24:41 -07001858 if (mad_send_wr->refcount == 1)
1859 list_move_tail(&mad_send_wr->agent_list,
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001860 &mad_send_wr->mad_agent_priv->done_list);
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001861}
1862
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001864 struct ib_mad_recv_wc *mad_recv_wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865{
1866 struct ib_mad_send_wr_private *mad_send_wr;
1867 struct ib_mad_send_wc mad_send_wc;
1868 unsigned long flags;
1869
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001870 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1871 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
Ira Weiny1471cb62014-08-08 19:00:56 -04001872 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001873 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1874 mad_recv_wc);
1875 if (!mad_recv_wc) {
Sean Hefty1b52fa982006-05-12 14:57:52 -07001876 deref_mad_agent(mad_agent_priv);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001877 return;
1878 }
1879 }
1880
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 /* Complete corresponding request */
Ira Weiny96909302015-05-08 14:27:22 -04001882 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001884 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 if (!mad_send_wr) {
1886 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Ira Weiny1471cb62014-08-08 19:00:56 -04001887 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1888 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1889 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1890 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1891 /* user rmpp is in effect
1892 * and this is an active RMPP MAD
1893 */
1894 mad_recv_wc->wc->wr_id = 0;
1895 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1896 mad_recv_wc);
1897 atomic_dec(&mad_agent_priv->refcount);
1898 } else {
1899 /* not user rmpp, revert to normal behavior and
1900 * drop the mad */
1901 ib_free_recv_mad(mad_recv_wc);
1902 deref_mad_agent(mad_agent_priv);
1903 return;
1904 }
1905 } else {
1906 ib_mark_mad_done(mad_send_wr);
1907 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1908
1909 /* Defined behavior is to complete response before request */
1910 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1911 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1912 mad_recv_wc);
1913 atomic_dec(&mad_agent_priv->refcount);
1914
1915 mad_send_wc.status = IB_WC_SUCCESS;
1916 mad_send_wc.vendor_err = 0;
1917 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1918 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 } else {
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001921 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1922 mad_recv_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -07001923 deref_mad_agent(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 }
1925}
1926
Swapna Thete0b307042012-02-25 17:47:32 -08001927static bool generate_unmatched_resp(struct ib_mad_private *recv,
1928 struct ib_mad_private *response)
1929{
1930 if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET ||
1931 recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) {
1932 memcpy(response, recv, sizeof *response);
1933 response->header.recv_wc.wc = &response->header.wc;
1934 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1935 response->header.recv_wc.recv_buf.grh = &response->grh;
1936 response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
1937 response->mad.mad.mad_hdr.status =
1938 cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
Jack Morgenstein840777d2012-04-24 16:06:50 -07001939 if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
1940 response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION;
Swapna Thete0b307042012-02-25 17:47:32 -08001941
1942 return true;
1943 } else {
1944 return false;
1945 }
1946}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1948 struct ib_wc *wc)
1949{
1950 struct ib_mad_qp_info *qp_info;
1951 struct ib_mad_private_header *mad_priv_hdr;
Hal Rosenstock445d6802007-08-03 10:45:17 -07001952 struct ib_mad_private *recv, *response = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 struct ib_mad_list_head *mad_list;
1954 struct ib_mad_agent_private *mad_agent;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001955 int port_num;
Jack Morgensteina9e74322012-04-24 16:08:57 -07001956 int ret = IB_MAD_RESULT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1959 qp_info = mad_list->mad_queue->qp_info;
1960 dequeue_mad(mad_list);
1961
1962 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1963 mad_list);
1964 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
Ralph Campbell15271062006-12-12 14:28:30 -08001965 ib_dma_unmap_single(port_priv->device,
1966 recv->header.mapping,
1967 sizeof(struct ib_mad_private) -
1968 sizeof(struct ib_mad_private_header),
1969 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970
1971 /* Setup MAD receive work completion from "normal" work completion */
Sean Hefty24239af2005-04-16 15:26:08 -07001972 recv->header.wc = *wc;
1973 recv->header.recv_wc.wc = &recv->header.wc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1975 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1976 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1977
1978 if (atomic_read(&qp_info->snoop_count))
1979 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1980
1981 /* Validate MAD */
Ira Weiny77f60832015-05-08 14:27:21 -04001982 if (!validate_mad(&recv->mad.mad.mad_hdr, qp_info->qp->qp_num))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 goto out;
1984
Hal Rosenstock445d6802007-08-03 10:45:17 -07001985 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1986 if (!response) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001987 dev_err(&port_priv->device->dev,
1988 "ib_mad_recv_done_handler no memory for response buffer\n");
Hal Rosenstock445d6802007-08-03 10:45:17 -07001989 goto out;
1990 }
1991
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001992 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1993 port_num = wc->port_num;
1994 else
1995 port_num = port_priv->port_num;
1996
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 if (recv->mad.mad.mad_hdr.mgmt_class ==
1998 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001999 enum smi_forward_action retsmi;
2000
Hal Rosenstockde493d42007-04-02 11:24:07 -04002001 if (smi_handle_dr_smp_recv(&recv->mad.smp,
2002 port_priv->device->node_type,
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002003 port_num,
Hal Rosenstockde493d42007-04-02 11:24:07 -04002004 port_priv->device->phys_port_cnt) ==
2005 IB_SMI_DISCARD)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 goto out;
Hal Rosenstockde493d42007-04-02 11:24:07 -04002007
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002008 retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
2009 if (retsmi == IB_SMI_LOCAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 goto local;
Hal Rosenstockde493d42007-04-02 11:24:07 -04002011
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002012 if (retsmi == IB_SMI_SEND) { /* don't forward */
2013 if (smi_handle_dr_smp_send(&recv->mad.smp,
2014 port_priv->device->node_type,
2015 port_num) == IB_SMI_DISCARD)
2016 goto out;
Hal Rosenstockde493d42007-04-02 11:24:07 -04002017
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002018 if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
2019 goto out;
2020 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
2021 /* forward case for switches */
2022 memcpy(response, recv, sizeof(*response));
2023 response->header.recv_wc.wc = &response->header.wc;
2024 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
2025 response->header.recv_wc.recv_buf.grh = &response->grh;
2026
Hal Rosenstock86dfbec2007-08-03 10:45:17 -07002027 agent_send_response(&response->mad.mad,
2028 &response->grh, wc,
2029 port_priv->device,
2030 smi_get_fwd_port(&recv->mad.smp),
2031 qp_info->qp->qp_num);
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002032
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 goto out;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002034 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 }
2036
2037local:
2038 /* Give driver "right of first refusal" on incoming MAD */
2039 if (port_priv->device->process_mad) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 ret = port_priv->device->process_mad(port_priv->device, 0,
2041 port_priv->port_num,
2042 wc, &recv->grh,
2043 &recv->mad.mad,
2044 &response->mad.mad);
2045 if (ret & IB_MAD_RESULT_SUCCESS) {
2046 if (ret & IB_MAD_RESULT_CONSUMED)
2047 goto out;
2048 if (ret & IB_MAD_RESULT_REPLY) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002049 agent_send_response(&response->mad.mad,
2050 &recv->grh, wc,
2051 port_priv->device,
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002052 port_num,
Sean Hefty34816ad2005-10-25 10:51:39 -07002053 qp_info->qp->qp_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 goto out;
2055 }
2056 }
2057 }
2058
Ira Weinyd94bd262015-06-06 14:38:22 -04002059 mad_agent = find_mad_agent(port_priv, &recv->mad.mad.mad_hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 if (mad_agent) {
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07002061 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 /*
2063 * recv is freed up in error cases in ib_mad_complete_recv
2064 * or via recv_handler in ib_mad_complete_recv()
2065 */
2066 recv = NULL;
Jack Morgensteina9e74322012-04-24 16:08:57 -07002067 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2068 generate_unmatched_resp(recv, response)) {
Swapna Thete0b307042012-02-25 17:47:32 -08002069 agent_send_response(&response->mad.mad, &recv->grh, wc,
2070 port_priv->device, port_num, qp_info->qp->qp_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 }
2072
2073out:
2074 /* Post another receive request for this QP */
2075 if (response) {
2076 ib_mad_post_receive_mads(qp_info, response);
2077 if (recv)
2078 kmem_cache_free(ib_mad_cache, recv);
2079 } else
2080 ib_mad_post_receive_mads(qp_info, recv);
2081}
2082
2083static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2084{
2085 struct ib_mad_send_wr_private *mad_send_wr;
2086 unsigned long delay;
2087
2088 if (list_empty(&mad_agent_priv->wait_list)) {
Tejun Heo136b5722012-08-21 13:18:24 -07002089 cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 } else {
2091 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2092 struct ib_mad_send_wr_private,
2093 agent_list);
2094
2095 if (time_after(mad_agent_priv->timeout,
2096 mad_send_wr->timeout)) {
2097 mad_agent_priv->timeout = mad_send_wr->timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 delay = mad_send_wr->timeout - jiffies;
2099 if ((long)delay <= 0)
2100 delay = 1;
Tejun Heoe7c2f962012-08-21 13:18:24 -07002101 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2102 &mad_agent_priv->timed_work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 }
2104 }
2105}
2106
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002107static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108{
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002109 struct ib_mad_agent_private *mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 struct ib_mad_send_wr_private *temp_mad_send_wr;
2111 struct list_head *list_item;
2112 unsigned long delay;
2113
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002114 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 list_del(&mad_send_wr->agent_list);
2116
2117 delay = mad_send_wr->timeout;
2118 mad_send_wr->timeout += jiffies;
2119
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002120 if (delay) {
2121 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2122 temp_mad_send_wr = list_entry(list_item,
2123 struct ib_mad_send_wr_private,
2124 agent_list);
2125 if (time_after(mad_send_wr->timeout,
2126 temp_mad_send_wr->timeout))
2127 break;
2128 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 }
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002130 else
2131 list_item = &mad_agent_priv->wait_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 list_add(&mad_send_wr->agent_list, list_item);
2133
2134 /* Reschedule a work item if we have a shorter timeout */
Tejun Heoe7c2f962012-08-21 13:18:24 -07002135 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2136 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2137 &mad_agent_priv->timed_work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138}
2139
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002140void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2141 int timeout_ms)
2142{
2143 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2144 wait_for_response(mad_send_wr);
2145}
2146
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147/*
2148 * Process a send work completion
2149 */
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002150void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2151 struct ib_mad_send_wc *mad_send_wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152{
2153 struct ib_mad_agent_private *mad_agent_priv;
2154 unsigned long flags;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002155 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002157 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Ira Weiny1471cb62014-08-08 19:00:56 -04002159 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002160 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2161 if (ret == IB_RMPP_RESULT_CONSUMED)
2162 goto done;
2163 } else
2164 ret = IB_RMPP_RESULT_UNHANDLED;
2165
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 if (mad_send_wc->status != IB_WC_SUCCESS &&
2167 mad_send_wr->status == IB_WC_SUCCESS) {
2168 mad_send_wr->status = mad_send_wc->status;
2169 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2170 }
2171
2172 if (--mad_send_wr->refcount > 0) {
2173 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2174 mad_send_wr->status == IB_WC_SUCCESS) {
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002175 wait_for_response(mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002177 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 }
2179
2180 /* Remove send from MAD agent and notify client of completion */
2181 list_del(&mad_send_wr->agent_list);
2182 adjust_timeout(mad_agent_priv);
2183 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2184
2185 if (mad_send_wr->status != IB_WC_SUCCESS )
2186 mad_send_wc->status = mad_send_wr->status;
Sean Hefty34816ad2005-10-25 10:51:39 -07002187 if (ret == IB_RMPP_RESULT_INTERNAL)
2188 ib_rmpp_send_handler(mad_send_wc);
2189 else
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002190 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2191 mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
2193 /* Release reference on agent taken when sending */
Sean Hefty1b52fa982006-05-12 14:57:52 -07002194 deref_mad_agent(mad_agent_priv);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002195 return;
2196done:
2197 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198}
2199
2200static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2201 struct ib_wc *wc)
2202{
2203 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2204 struct ib_mad_list_head *mad_list;
2205 struct ib_mad_qp_info *qp_info;
2206 struct ib_mad_queue *send_queue;
2207 struct ib_send_wr *bad_send_wr;
Sean Hefty34816ad2005-10-25 10:51:39 -07002208 struct ib_mad_send_wc mad_send_wc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 unsigned long flags;
2210 int ret;
2211
2212 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2213 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2214 mad_list);
2215 send_queue = mad_list->mad_queue;
2216 qp_info = send_queue->qp_info;
2217
2218retry:
Ralph Campbell15271062006-12-12 14:28:30 -08002219 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2220 mad_send_wr->header_mapping,
2221 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2222 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2223 mad_send_wr->payload_mapping,
2224 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 queued_send_wr = NULL;
2226 spin_lock_irqsave(&send_queue->lock, flags);
2227 list_del(&mad_list->list);
2228
2229 /* Move queued send to the send queue */
2230 if (send_queue->count-- > send_queue->max_active) {
2231 mad_list = container_of(qp_info->overflow_list.next,
2232 struct ib_mad_list_head, list);
2233 queued_send_wr = container_of(mad_list,
2234 struct ib_mad_send_wr_private,
2235 mad_list);
Akinobu Mita179e0912006-06-26 00:24:41 -07002236 list_move_tail(&mad_list->list, &send_queue->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 }
2238 spin_unlock_irqrestore(&send_queue->lock, flags);
2239
Sean Hefty34816ad2005-10-25 10:51:39 -07002240 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2241 mad_send_wc.status = wc->status;
2242 mad_send_wc.vendor_err = wc->vendor_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 if (atomic_read(&qp_info->snoop_count))
Sean Hefty34816ad2005-10-25 10:51:39 -07002244 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 IB_MAD_SNOOP_SEND_COMPLETIONS);
Sean Hefty34816ad2005-10-25 10:51:39 -07002246 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247
2248 if (queued_send_wr) {
2249 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
Sean Hefty34816ad2005-10-25 10:51:39 -07002250 &bad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002252 dev_err(&port_priv->device->dev,
2253 "ib_post_send failed: %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 mad_send_wr = queued_send_wr;
2255 wc->status = IB_WC_LOC_QP_OP_ERR;
2256 goto retry;
2257 }
2258 }
2259}
2260
2261static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2262{
2263 struct ib_mad_send_wr_private *mad_send_wr;
2264 struct ib_mad_list_head *mad_list;
2265 unsigned long flags;
2266
2267 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2268 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2269 mad_send_wr = container_of(mad_list,
2270 struct ib_mad_send_wr_private,
2271 mad_list);
2272 mad_send_wr->retry = 1;
2273 }
2274 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2275}
2276
2277static void mad_error_handler(struct ib_mad_port_private *port_priv,
2278 struct ib_wc *wc)
2279{
2280 struct ib_mad_list_head *mad_list;
2281 struct ib_mad_qp_info *qp_info;
2282 struct ib_mad_send_wr_private *mad_send_wr;
2283 int ret;
2284
2285 /* Determine if failure was a send or receive */
2286 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2287 qp_info = mad_list->mad_queue->qp_info;
2288 if (mad_list->mad_queue == &qp_info->recv_queue)
2289 /*
2290 * Receive errors indicate that the QP has entered the error
2291 * state - error handling/shutdown code will cleanup
2292 */
2293 return;
2294
2295 /*
2296 * Send errors will transition the QP to SQE - move
2297 * QP to RTS and repost flushed work requests
2298 */
2299 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2300 mad_list);
2301 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2302 if (mad_send_wr->retry) {
2303 /* Repost send */
2304 struct ib_send_wr *bad_send_wr;
2305
2306 mad_send_wr->retry = 0;
2307 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2308 &bad_send_wr);
2309 if (ret)
2310 ib_mad_send_done_handler(port_priv, wc);
2311 } else
2312 ib_mad_send_done_handler(port_priv, wc);
2313 } else {
2314 struct ib_qp_attr *attr;
2315
2316 /* Transition QP to RTS and fail offending send */
2317 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2318 if (attr) {
2319 attr->qp_state = IB_QPS_RTS;
2320 attr->cur_qp_state = IB_QPS_SQE;
2321 ret = ib_modify_qp(qp_info->qp, attr,
2322 IB_QP_STATE | IB_QP_CUR_STATE);
2323 kfree(attr);
2324 if (ret)
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002325 dev_err(&port_priv->device->dev,
2326 "mad_error_handler - ib_modify_qp to RTS : %d\n",
2327 ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 else
2329 mark_sends_for_retry(qp_info);
2330 }
2331 ib_mad_send_done_handler(port_priv, wc);
2332 }
2333}
2334
2335/*
2336 * IB MAD completion callback
2337 */
David Howellsc4028952006-11-22 14:57:56 +00002338static void ib_mad_completion_handler(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339{
2340 struct ib_mad_port_private *port_priv;
2341 struct ib_wc wc;
2342
David Howellsc4028952006-11-22 14:57:56 +00002343 port_priv = container_of(work, struct ib_mad_port_private, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2345
2346 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2347 if (wc.status == IB_WC_SUCCESS) {
2348 switch (wc.opcode) {
2349 case IB_WC_SEND:
2350 ib_mad_send_done_handler(port_priv, &wc);
2351 break;
2352 case IB_WC_RECV:
2353 ib_mad_recv_done_handler(port_priv, &wc);
2354 break;
2355 default:
2356 BUG_ON(1);
2357 break;
2358 }
2359 } else
2360 mad_error_handler(port_priv, &wc);
2361 }
2362}
2363
2364static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2365{
2366 unsigned long flags;
2367 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2368 struct ib_mad_send_wc mad_send_wc;
2369 struct list_head cancel_list;
2370
2371 INIT_LIST_HEAD(&cancel_list);
2372
2373 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2374 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2375 &mad_agent_priv->send_list, agent_list) {
2376 if (mad_send_wr->status == IB_WC_SUCCESS) {
Roland Dreier3cd96562006-09-22 15:22:46 -07002377 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2379 }
2380 }
2381
2382 /* Empty wait list to prevent receives from finding a request */
2383 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2384 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2385
2386 /* Report all cancelled requests */
2387 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2388 mad_send_wc.vendor_err = 0;
2389
2390 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2391 &cancel_list, agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002392 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2393 list_del(&mad_send_wr->agent_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2395 &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 atomic_dec(&mad_agent_priv->refcount);
2397 }
2398}
2399
2400static struct ib_mad_send_wr_private*
Sean Hefty34816ad2005-10-25 10:51:39 -07002401find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2402 struct ib_mad_send_buf *send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403{
2404 struct ib_mad_send_wr_private *mad_send_wr;
2405
2406 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2407 agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002408 if (&mad_send_wr->send_buf == send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 return mad_send_wr;
2410 }
2411
2412 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2413 agent_list) {
Ira Weinyc597eee2015-05-08 13:10:03 -04002414 if (is_rmpp_data_mad(mad_agent_priv,
2415 mad_send_wr->send_buf.mad) &&
Sean Hefty34816ad2005-10-25 10:51:39 -07002416 &mad_send_wr->send_buf == send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 return mad_send_wr;
2418 }
2419 return NULL;
2420}
2421
Sean Hefty34816ad2005-10-25 10:51:39 -07002422int ib_modify_mad(struct ib_mad_agent *mad_agent,
2423 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424{
2425 struct ib_mad_agent_private *mad_agent_priv;
2426 struct ib_mad_send_wr_private *mad_send_wr;
2427 unsigned long flags;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002428 int active;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429
2430 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2431 agent);
2432 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Sean Hefty34816ad2005-10-25 10:51:39 -07002433 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002434 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002436 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 }
2438
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002439 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002440 if (!timeout_ms) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002442 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 }
2444
Sean Hefty34816ad2005-10-25 10:51:39 -07002445 mad_send_wr->send_buf.timeout_ms = timeout_ms;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002446 if (active)
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002447 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2448 else
2449 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002451 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2452 return 0;
2453}
2454EXPORT_SYMBOL(ib_modify_mad);
2455
Sean Hefty34816ad2005-10-25 10:51:39 -07002456void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2457 struct ib_mad_send_buf *send_buf)
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002458{
Sean Hefty34816ad2005-10-25 10:51:39 -07002459 ib_modify_mad(mad_agent, send_buf, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460}
2461EXPORT_SYMBOL(ib_cancel_mad);
2462
David Howellsc4028952006-11-22 14:57:56 +00002463static void local_completions(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464{
2465 struct ib_mad_agent_private *mad_agent_priv;
2466 struct ib_mad_local_private *local;
2467 struct ib_mad_agent_private *recv_mad_agent;
2468 unsigned long flags;
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002469 int free_mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 struct ib_wc wc;
2471 struct ib_mad_send_wc mad_send_wc;
2472
David Howellsc4028952006-11-22 14:57:56 +00002473 mad_agent_priv =
2474 container_of(work, struct ib_mad_agent_private, local_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475
2476 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2477 while (!list_empty(&mad_agent_priv->local_list)) {
2478 local = list_entry(mad_agent_priv->local_list.next,
2479 struct ib_mad_local_private,
2480 completion_list);
Michael S. Tsirkin37289ef2006-03-30 15:52:54 +02002481 list_del(&local->completion_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002483 free_mad = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 if (local->mad_priv) {
2485 recv_mad_agent = local->recv_mad_agent;
2486 if (!recv_mad_agent) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002487 dev_err(&mad_agent_priv->agent.device->dev,
2488 "No receive MAD agent for local completion\n");
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002489 free_mad = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 goto local_send_completion;
2491 }
2492
2493 /*
2494 * Defined behavior is to complete response
2495 * before request
2496 */
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +02002497 build_smp_wc(recv_mad_agent->agent.qp,
2498 (unsigned long) local->mad_send_wr,
Sean Hefty97f52eb2005-08-13 21:05:57 -07002499 be16_to_cpu(IB_LID_PERMISSIVE),
Sean Hefty34816ad2005-10-25 10:51:39 -07002500 0, recv_mad_agent->agent.port_num, &wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501
2502 local->mad_priv->header.recv_wc.wc = &wc;
2503 local->mad_priv->header.recv_wc.mad_len =
2504 sizeof(struct ib_mad);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002505 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2506 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2507 &local->mad_priv->header.recv_wc.rmpp_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2509 local->mad_priv->header.recv_wc.recv_buf.mad =
2510 &local->mad_priv->mad.mad;
2511 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2512 snoop_recv(recv_mad_agent->qp_info,
2513 &local->mad_priv->header.recv_wc,
2514 IB_MAD_SNOOP_RECVS);
2515 recv_mad_agent->agent.recv_handler(
2516 &recv_mad_agent->agent,
2517 &local->mad_priv->header.recv_wc);
2518 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2519 atomic_dec(&recv_mad_agent->refcount);
2520 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2521 }
2522
2523local_send_completion:
2524 /* Complete send */
2525 mad_send_wc.status = IB_WC_SUCCESS;
2526 mad_send_wc.vendor_err = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07002527 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
Sean Hefty34816ad2005-10-25 10:51:39 -07002529 snoop_send(mad_agent_priv->qp_info,
2530 &local->mad_send_wr->send_buf,
2531 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2533 &mad_send_wc);
2534
2535 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 atomic_dec(&mad_agent_priv->refcount);
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002537 if (free_mad)
Hal Rosenstock2c153b92005-07-27 11:45:31 -07002538 kmem_cache_free(ib_mad_cache, local->mad_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 kfree(local);
2540 }
2541 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2542}
2543
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002544static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2545{
2546 int ret;
2547
Sean Hefty4fc8cd42007-11-27 00:11:04 -08002548 if (!mad_send_wr->retries_left)
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002549 return -ETIMEDOUT;
2550
Sean Hefty4fc8cd42007-11-27 00:11:04 -08002551 mad_send_wr->retries_left--;
2552 mad_send_wr->send_buf.retries++;
2553
Sean Hefty34816ad2005-10-25 10:51:39 -07002554 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002555
Ira Weiny1471cb62014-08-08 19:00:56 -04002556 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002557 ret = ib_retry_rmpp(mad_send_wr);
2558 switch (ret) {
2559 case IB_RMPP_RESULT_UNHANDLED:
2560 ret = ib_send_mad(mad_send_wr);
2561 break;
2562 case IB_RMPP_RESULT_CONSUMED:
2563 ret = 0;
2564 break;
2565 default:
2566 ret = -ECOMM;
2567 break;
2568 }
2569 } else
2570 ret = ib_send_mad(mad_send_wr);
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002571
2572 if (!ret) {
2573 mad_send_wr->refcount++;
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002574 list_add_tail(&mad_send_wr->agent_list,
2575 &mad_send_wr->mad_agent_priv->send_list);
2576 }
2577 return ret;
2578}
2579
David Howellsc4028952006-11-22 14:57:56 +00002580static void timeout_sends(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581{
2582 struct ib_mad_agent_private *mad_agent_priv;
2583 struct ib_mad_send_wr_private *mad_send_wr;
2584 struct ib_mad_send_wc mad_send_wc;
2585 unsigned long flags, delay;
2586
David Howellsc4028952006-11-22 14:57:56 +00002587 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2588 timed_work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 mad_send_wc.vendor_err = 0;
2590
2591 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2592 while (!list_empty(&mad_agent_priv->wait_list)) {
2593 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2594 struct ib_mad_send_wr_private,
2595 agent_list);
2596
2597 if (time_after(mad_send_wr->timeout, jiffies)) {
2598 delay = mad_send_wr->timeout - jiffies;
2599 if ((long)delay <= 0)
2600 delay = 1;
2601 queue_delayed_work(mad_agent_priv->qp_info->
2602 port_priv->wq,
2603 &mad_agent_priv->timed_work, delay);
2604 break;
2605 }
2606
Hal Rosenstockdbf92272005-07-27 11:45:30 -07002607 list_del(&mad_send_wr->agent_list);
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002608 if (mad_send_wr->status == IB_WC_SUCCESS &&
2609 !retry_send(mad_send_wr))
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002610 continue;
2611
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2613
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002614 if (mad_send_wr->status == IB_WC_SUCCESS)
2615 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2616 else
2617 mad_send_wc.status = mad_send_wr->status;
Sean Hefty34816ad2005-10-25 10:51:39 -07002618 mad_send_wc.send_buf = &mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2620 &mad_send_wc);
2621
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622 atomic_dec(&mad_agent_priv->refcount);
2623 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2624 }
2625 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2626}
2627
Hal Rosenstock5dd2ce12005-08-15 14:16:36 -07002628static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629{
2630 struct ib_mad_port_private *port_priv = cq->cq_context;
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002631 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002633 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2634 if (!list_empty(&port_priv->port_list))
2635 queue_work(port_priv->wq, &port_priv->work);
2636 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637}
2638
2639/*
2640 * Allocate receive MADs and post receive WRs for them
2641 */
2642static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2643 struct ib_mad_private *mad)
2644{
2645 unsigned long flags;
2646 int post, ret;
2647 struct ib_mad_private *mad_priv;
2648 struct ib_sge sg_list;
2649 struct ib_recv_wr recv_wr, *bad_recv_wr;
2650 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2651
2652 /* Initialize common scatter list fields */
2653 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2654 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2655
2656 /* Initialize common receive WR fields */
2657 recv_wr.next = NULL;
2658 recv_wr.sg_list = &sg_list;
2659 recv_wr.num_sge = 1;
2660
2661 do {
2662 /* Allocate and map receive buffer */
2663 if (mad) {
2664 mad_priv = mad;
2665 mad = NULL;
2666 } else {
2667 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2668 if (!mad_priv) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002669 dev_err(&qp_info->port_priv->device->dev,
2670 "No memory for receive buffer\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 ret = -ENOMEM;
2672 break;
2673 }
2674 }
Ralph Campbell15271062006-12-12 14:28:30 -08002675 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2676 &mad_priv->grh,
2677 sizeof *mad_priv -
2678 sizeof mad_priv->header,
2679 DMA_FROM_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02002680 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2681 sg_list.addr))) {
2682 ret = -ENOMEM;
2683 break;
2684 }
Ralph Campbell15271062006-12-12 14:28:30 -08002685 mad_priv->header.mapping = sg_list.addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2687 mad_priv->header.mad_list.mad_queue = recv_queue;
2688
2689 /* Post receive WR */
2690 spin_lock_irqsave(&recv_queue->lock, flags);
2691 post = (++recv_queue->count < recv_queue->max_active);
2692 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2693 spin_unlock_irqrestore(&recv_queue->lock, flags);
2694 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2695 if (ret) {
2696 spin_lock_irqsave(&recv_queue->lock, flags);
2697 list_del(&mad_priv->header.mad_list.list);
2698 recv_queue->count--;
2699 spin_unlock_irqrestore(&recv_queue->lock, flags);
Ralph Campbell15271062006-12-12 14:28:30 -08002700 ib_dma_unmap_single(qp_info->port_priv->device,
2701 mad_priv->header.mapping,
2702 sizeof *mad_priv -
2703 sizeof mad_priv->header,
2704 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 kmem_cache_free(ib_mad_cache, mad_priv);
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002706 dev_err(&qp_info->port_priv->device->dev,
2707 "ib_post_recv failed: %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708 break;
2709 }
2710 } while (post);
2711
2712 return ret;
2713}
2714
2715/*
2716 * Return all the posted receive MADs
2717 */
2718static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2719{
2720 struct ib_mad_private_header *mad_priv_hdr;
2721 struct ib_mad_private *recv;
2722 struct ib_mad_list_head *mad_list;
2723
Eli Cohenfac70d52010-09-27 17:51:11 -07002724 if (!qp_info->qp)
2725 return;
2726
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 while (!list_empty(&qp_info->recv_queue.list)) {
2728
2729 mad_list = list_entry(qp_info->recv_queue.list.next,
2730 struct ib_mad_list_head, list);
2731 mad_priv_hdr = container_of(mad_list,
2732 struct ib_mad_private_header,
2733 mad_list);
2734 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2735 header);
2736
2737 /* Remove from posted receive MAD list */
2738 list_del(&mad_list->list);
2739
Ralph Campbell15271062006-12-12 14:28:30 -08002740 ib_dma_unmap_single(qp_info->port_priv->device,
2741 recv->header.mapping,
2742 sizeof(struct ib_mad_private) -
2743 sizeof(struct ib_mad_private_header),
2744 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 kmem_cache_free(ib_mad_cache, recv);
2746 }
2747
2748 qp_info->recv_queue.count = 0;
2749}
2750
2751/*
2752 * Start the port
2753 */
2754static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2755{
2756 int ret, i;
2757 struct ib_qp_attr *attr;
2758 struct ib_qp *qp;
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002759 u16 pkey_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760
2761 attr = kmalloc(sizeof *attr, GFP_KERNEL);
Roland Dreier3cd96562006-09-22 15:22:46 -07002762 if (!attr) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002763 dev_err(&port_priv->device->dev,
2764 "Couldn't kmalloc ib_qp_attr\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765 return -ENOMEM;
2766 }
2767
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002768 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2769 IB_DEFAULT_PKEY_FULL, &pkey_index);
2770 if (ret)
2771 pkey_index = 0;
2772
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2774 qp = port_priv->qp_info[i].qp;
Eli Cohenfac70d52010-09-27 17:51:11 -07002775 if (!qp)
2776 continue;
2777
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 /*
2779 * PKey index for QP1 is irrelevant but
2780 * one is needed for the Reset to Init transition
2781 */
2782 attr->qp_state = IB_QPS_INIT;
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002783 attr->pkey_index = pkey_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2785 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2786 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2787 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002788 dev_err(&port_priv->device->dev,
2789 "Couldn't change QP%d state to INIT: %d\n",
2790 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 goto out;
2792 }
2793
2794 attr->qp_state = IB_QPS_RTR;
2795 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2796 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002797 dev_err(&port_priv->device->dev,
2798 "Couldn't change QP%d state to RTR: %d\n",
2799 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 goto out;
2801 }
2802
2803 attr->qp_state = IB_QPS_RTS;
2804 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2805 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2806 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002807 dev_err(&port_priv->device->dev,
2808 "Couldn't change QP%d state to RTS: %d\n",
2809 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 goto out;
2811 }
2812 }
2813
2814 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2815 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002816 dev_err(&port_priv->device->dev,
2817 "Failed to request completion notification: %d\n",
2818 ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 goto out;
2820 }
2821
2822 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
Eli Cohenfac70d52010-09-27 17:51:11 -07002823 if (!port_priv->qp_info[i].qp)
2824 continue;
2825
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2827 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002828 dev_err(&port_priv->device->dev,
2829 "Couldn't post receive WRs\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 goto out;
2831 }
2832 }
2833out:
2834 kfree(attr);
2835 return ret;
2836}
2837
2838static void qp_event_handler(struct ib_event *event, void *qp_context)
2839{
2840 struct ib_mad_qp_info *qp_info = qp_context;
2841
2842 /* It's worse than that! He's dead, Jim! */
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002843 dev_err(&qp_info->port_priv->device->dev,
2844 "Fatal error (%d) on MAD QP (%d)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 event->event, qp_info->qp->qp_num);
2846}
2847
2848static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2849 struct ib_mad_queue *mad_queue)
2850{
2851 mad_queue->qp_info = qp_info;
2852 mad_queue->count = 0;
2853 spin_lock_init(&mad_queue->lock);
2854 INIT_LIST_HEAD(&mad_queue->list);
2855}
2856
2857static void init_mad_qp(struct ib_mad_port_private *port_priv,
2858 struct ib_mad_qp_info *qp_info)
2859{
2860 qp_info->port_priv = port_priv;
2861 init_mad_queue(qp_info, &qp_info->send_queue);
2862 init_mad_queue(qp_info, &qp_info->recv_queue);
2863 INIT_LIST_HEAD(&qp_info->overflow_list);
2864 spin_lock_init(&qp_info->snoop_lock);
2865 qp_info->snoop_table = NULL;
2866 qp_info->snoop_table_size = 0;
2867 atomic_set(&qp_info->snoop_count, 0);
2868}
2869
2870static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2871 enum ib_qp_type qp_type)
2872{
2873 struct ib_qp_init_attr qp_init_attr;
2874 int ret;
2875
2876 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2877 qp_init_attr.send_cq = qp_info->port_priv->cq;
2878 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2879 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07002880 qp_init_attr.cap.max_send_wr = mad_sendq_size;
2881 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2883 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2884 qp_init_attr.qp_type = qp_type;
2885 qp_init_attr.port_num = qp_info->port_priv->port_num;
2886 qp_init_attr.qp_context = qp_info;
2887 qp_init_attr.event_handler = qp_event_handler;
2888 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2889 if (IS_ERR(qp_info->qp)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002890 dev_err(&qp_info->port_priv->device->dev,
2891 "Couldn't create ib_mad QP%d\n",
2892 get_spl_qp_index(qp_type));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 ret = PTR_ERR(qp_info->qp);
2894 goto error;
2895 }
2896 /* Use minimum queue sizes unless the CQ is resized */
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07002897 qp_info->send_queue.max_active = mad_sendq_size;
2898 qp_info->recv_queue.max_active = mad_recvq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 return 0;
2900
2901error:
2902 return ret;
2903}
2904
2905static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2906{
Eli Cohenfac70d52010-09-27 17:51:11 -07002907 if (!qp_info->qp)
2908 return;
2909
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 ib_destroy_qp(qp_info->qp);
Jesper Juhl6044ec82005-11-07 01:01:32 -08002911 kfree(qp_info->snoop_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912}
2913
2914/*
2915 * Open the port
2916 * Create the QP, PD, MR, and CQ if needed
2917 */
2918static int ib_mad_port_open(struct ib_device *device,
2919 int port_num)
2920{
2921 int ret, cq_size;
2922 struct ib_mad_port_private *port_priv;
2923 unsigned long flags;
2924 char name[sizeof "ib_mad123"];
Eli Cohenfac70d52010-09-27 17:51:11 -07002925 int has_smi;
Matan Barak8e372102015-06-11 16:35:21 +03002926 struct ib_cq_init_attr cq_attr = {};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 /* Create new device info */
Roland Dreierde6eb662005-11-02 07:23:14 -08002929 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 if (!port_priv) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002931 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 return -ENOMEM;
2933 }
Roland Dreierde6eb662005-11-02 07:23:14 -08002934
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 port_priv->device = device;
2936 port_priv->port_num = port_num;
2937 spin_lock_init(&port_priv->reg_lock);
2938 INIT_LIST_HEAD(&port_priv->agent_list);
2939 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2940 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2941
Eli Cohenfac70d52010-09-27 17:51:11 -07002942 cq_size = mad_sendq_size + mad_recvq_size;
Michael Wang29541e32015-05-05 14:50:33 +02002943 has_smi = rdma_cap_ib_smi(device, port_num);
Eli Cohenfac70d52010-09-27 17:51:11 -07002944 if (has_smi)
2945 cq_size *= 2;
2946
Matan Barak8e372102015-06-11 16:35:21 +03002947 cq_attr.cqe = cq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 port_priv->cq = ib_create_cq(port_priv->device,
Hal Rosenstock5dd2ce12005-08-15 14:16:36 -07002949 ib_mad_thread_completion_handler,
Matan Barak8e372102015-06-11 16:35:21 +03002950 NULL, port_priv, &cq_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 if (IS_ERR(port_priv->cq)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002952 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 ret = PTR_ERR(port_priv->cq);
2954 goto error3;
2955 }
2956
2957 port_priv->pd = ib_alloc_pd(device);
2958 if (IS_ERR(port_priv->pd)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002959 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960 ret = PTR_ERR(port_priv->pd);
2961 goto error4;
2962 }
2963
2964 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2965 if (IS_ERR(port_priv->mr)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002966 dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 ret = PTR_ERR(port_priv->mr);
2968 goto error5;
2969 }
2970
Eli Cohenfac70d52010-09-27 17:51:11 -07002971 if (has_smi) {
2972 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2973 if (ret)
2974 goto error6;
2975 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2977 if (ret)
2978 goto error7;
2979
2980 snprintf(name, sizeof name, "ib_mad%d", port_num);
2981 port_priv->wq = create_singlethread_workqueue(name);
2982 if (!port_priv->wq) {
2983 ret = -ENOMEM;
2984 goto error8;
2985 }
David Howellsc4028952006-11-22 14:57:56 +00002986 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002988 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2989 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2990 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2991
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 ret = ib_mad_port_start(port_priv);
2993 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002994 dev_err(&device->dev, "Couldn't start port\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995 goto error9;
2996 }
2997
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 return 0;
2999
3000error9:
Michael S. Tsirkindc059802006-03-20 10:08:25 -08003001 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3002 list_del_init(&port_priv->port_list);
3003 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3004
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 destroy_workqueue(port_priv->wq);
3006error8:
3007 destroy_mad_qp(&port_priv->qp_info[1]);
3008error7:
3009 destroy_mad_qp(&port_priv->qp_info[0]);
3010error6:
3011 ib_dereg_mr(port_priv->mr);
3012error5:
3013 ib_dealloc_pd(port_priv->pd);
3014error4:
3015 ib_destroy_cq(port_priv->cq);
3016 cleanup_recv_queue(&port_priv->qp_info[1]);
3017 cleanup_recv_queue(&port_priv->qp_info[0]);
3018error3:
3019 kfree(port_priv);
3020
3021 return ret;
3022}
3023
3024/*
3025 * Close the port
3026 * If there are no classes using the port, free the port
3027 * resources (CQ, MR, PD, QP) and remove the port's info structure
3028 */
3029static int ib_mad_port_close(struct ib_device *device, int port_num)
3030{
3031 struct ib_mad_port_private *port_priv;
3032 unsigned long flags;
3033
3034 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3035 port_priv = __ib_get_mad_port(device, port_num);
3036 if (port_priv == NULL) {
3037 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003038 dev_err(&device->dev, "Port %d not found\n", port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 return -ENODEV;
3040 }
Michael S. Tsirkindc059802006-03-20 10:08:25 -08003041 list_del_init(&port_priv->port_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3043
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 destroy_workqueue(port_priv->wq);
3045 destroy_mad_qp(&port_priv->qp_info[1]);
3046 destroy_mad_qp(&port_priv->qp_info[0]);
3047 ib_dereg_mr(port_priv->mr);
3048 ib_dealloc_pd(port_priv->pd);
3049 ib_destroy_cq(port_priv->cq);
3050 cleanup_recv_queue(&port_priv->qp_info[1]);
3051 cleanup_recv_queue(&port_priv->qp_info[0]);
3052 /* XXX: Handle deallocation of MAD registration tables */
3053
3054 kfree(port_priv);
3055
3056 return 0;
3057}
3058
3059static void ib_mad_init_device(struct ib_device *device)
3060{
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003061 int start, end, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062
Tom Tucker07ebafb2006-08-03 16:02:42 -05003063 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003064 start = 0;
3065 end = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003066 } else {
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003067 start = 1;
3068 end = device->phys_port_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069 }
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003070
3071 for (i = start; i <= end; i++) {
Michael Wangc757dea2015-05-05 14:50:32 +02003072 if (!rdma_cap_ib_mad(device, i))
Michael Wang827f2a82015-05-05 14:50:20 +02003073 continue;
3074
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003075 if (ib_mad_port_open(device, i)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003076 dev_err(&device->dev, "Couldn't open port %d\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003077 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078 }
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003079 if (ib_agent_port_open(device, i)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003080 dev_err(&device->dev,
3081 "Couldn't open port %d for agents\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003082 goto error_agent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 }
3084 }
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07003085 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003087error_agent:
3088 if (ib_mad_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003089 dev_err(&device->dev, "Couldn't close port %d\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003090
3091error:
Michael Wang827f2a82015-05-05 14:50:20 +02003092 while (--i >= start) {
Michael Wangc757dea2015-05-05 14:50:32 +02003093 if (!rdma_cap_ib_mad(device, i))
Michael Wang827f2a82015-05-05 14:50:20 +02003094 continue;
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003095
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003096 if (ib_agent_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003097 dev_err(&device->dev,
3098 "Couldn't close port %d for agents\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003099 if (ib_mad_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003100 dev_err(&device->dev, "Couldn't close port %d\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102}
3103
3104static void ib_mad_remove_device(struct ib_device *device)
3105{
Michael Wang827f2a82015-05-05 14:50:20 +02003106 int start, end, i;
Steve Wise070e1402010-03-04 18:18:18 +00003107
Tom Tucker07ebafb2006-08-03 16:02:42 -05003108 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Michael Wang827f2a82015-05-05 14:50:20 +02003109 start = 0;
3110 end = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 } else {
Michael Wang827f2a82015-05-05 14:50:20 +02003112 start = 1;
3113 end = device->phys_port_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114 }
Michael Wang827f2a82015-05-05 14:50:20 +02003115
3116 for (i = start; i <= end; i++) {
Michael Wangc757dea2015-05-05 14:50:32 +02003117 if (!rdma_cap_ib_mad(device, i))
Michael Wang827f2a82015-05-05 14:50:20 +02003118 continue;
3119
3120 if (ib_agent_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003121 dev_err(&device->dev,
Michael Wang827f2a82015-05-05 14:50:20 +02003122 "Couldn't close port %d for agents\n", i);
3123 if (ib_mad_port_close(device, i))
3124 dev_err(&device->dev, "Couldn't close port %d\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125 }
3126}
3127
3128static struct ib_client mad_client = {
3129 .name = "mad",
3130 .add = ib_mad_init_device,
3131 .remove = ib_mad_remove_device
3132};
3133
3134static int __init ib_mad_init_module(void)
3135{
3136 int ret;
3137
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07003138 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3139 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3140
3141 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3142 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3143
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144 ib_mad_cache = kmem_cache_create("ib_mad",
3145 sizeof(struct ib_mad_private),
3146 0,
3147 SLAB_HWCACHE_ALIGN,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148 NULL);
3149 if (!ib_mad_cache) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003150 pr_err("Couldn't create ib_mad cache\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151 ret = -ENOMEM;
3152 goto error1;
3153 }
3154
3155 INIT_LIST_HEAD(&ib_mad_port_list);
3156
3157 if (ib_register_client(&mad_client)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003158 pr_err("Couldn't register ib_mad client\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 ret = -EINVAL;
3160 goto error2;
3161 }
3162
3163 return 0;
3164
3165error2:
3166 kmem_cache_destroy(ib_mad_cache);
3167error1:
3168 return ret;
3169}
3170
3171static void __exit ib_mad_cleanup_module(void)
3172{
3173 ib_unregister_client(&mad_client);
Alexey Dobriyan1a1d92c2006-09-27 01:49:40 -07003174 kmem_cache_destroy(ib_mad_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175}
3176
3177module_init(ib_mad_init_module);
3178module_exit(ib_mad_cleanup_module);