blob: 988bbda67952a0b4db77975886faf10efc1fbb14 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Hal Rosenstockde493d42007-04-02 11:24:07 -04002 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
Hal Rosenstockfa619a72005-07-27 11:45:37 -07003 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07005 * Copyright (c) 2009 HNR Consulting. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 */
Ira Weiny7ef5d4b2014-08-08 19:00:53 -040036
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040041#include <linux/module.h>
Jack Morgenstein9874e742006-06-17 20:37:34 -070042#include <rdma/ib_cache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include "mad_priv.h"
Hal Rosenstockfa619a72005-07-27 11:45:37 -070045#include "mad_rmpp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include "smi.h"
47#include "agent.h"
48
49MODULE_LICENSE("Dual BSD/GPL");
50MODULE_DESCRIPTION("kernel IB MAD API");
51MODULE_AUTHOR("Hal Rosenstock");
52MODULE_AUTHOR("Sean Hefty");
53
Roland Dreier16933952010-05-23 21:39:31 -070054static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
55static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
Hal Rosenstockb76aabc2009-09-07 08:28:48 -070056
57module_param_named(send_queue_size, mad_sendq_size, int, 0444);
58MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
59module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
60MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
61
Roland Dreiere54f8182006-11-29 15:33:07 -080062static struct kmem_cache *ib_mad_cache;
Hal Rosenstockfa619a72005-07-27 11:45:37 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064static struct list_head ib_mad_port_list;
65static u32 ib_mad_client_id = 0;
66
67/* Port list lock */
Roland Dreier6276e082009-09-05 20:24:23 -070068static DEFINE_SPINLOCK(ib_mad_port_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70/* Forward declarations */
71static int method_in_use(struct ib_mad_mgmt_method_table **method,
72 struct ib_mad_reg_req *mad_reg_req);
73static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
74static struct ib_mad_agent_private *find_mad_agent(
75 struct ib_mad_port_private *port_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -070076 struct ib_mad *mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
78 struct ib_mad_private *mad);
79static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
David Howellsc4028952006-11-22 14:57:56 +000080static void timeout_sends(struct work_struct *work);
81static void local_completions(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
83 struct ib_mad_agent_private *agent_priv,
84 u8 mgmt_class);
85static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86 struct ib_mad_agent_private *agent_priv);
87
88/*
89 * Returns a ib_mad_port_private structure or NULL for a device/port
90 * Assumes ib_mad_port_list_lock is being held
91 */
92static inline struct ib_mad_port_private *
93__ib_get_mad_port(struct ib_device *device, int port_num)
94{
95 struct ib_mad_port_private *entry;
96
97 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
98 if (entry->device == device && entry->port_num == port_num)
99 return entry;
100 }
101 return NULL;
102}
103
104/*
105 * Wrapper function to return a ib_mad_port_private structure or NULL
106 * for a device/port
107 */
108static inline struct ib_mad_port_private *
109ib_get_mad_port(struct ib_device *device, int port_num)
110{
111 struct ib_mad_port_private *entry;
112 unsigned long flags;
113
114 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
115 entry = __ib_get_mad_port(device, port_num);
116 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
117
118 return entry;
119}
120
121static inline u8 convert_mgmt_class(u8 mgmt_class)
122{
123 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
124 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
125 0 : mgmt_class;
126}
127
128static int get_spl_qp_index(enum ib_qp_type qp_type)
129{
130 switch (qp_type)
131 {
132 case IB_QPT_SMI:
133 return 0;
134 case IB_QPT_GSI:
135 return 1;
136 default:
137 return -1;
138 }
139}
140
141static int vendor_class_index(u8 mgmt_class)
142{
143 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
144}
145
146static int is_vendor_class(u8 mgmt_class)
147{
148 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
149 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
150 return 0;
151 return 1;
152}
153
154static int is_vendor_oui(char *oui)
155{
156 if (oui[0] || oui[1] || oui[2])
157 return 1;
158 return 0;
159}
160
161static int is_vendor_method_in_use(
162 struct ib_mad_mgmt_vendor_class *vendor_class,
163 struct ib_mad_reg_req *mad_reg_req)
164{
165 struct ib_mad_mgmt_method_table *method;
166 int i;
167
168 for (i = 0; i < MAX_MGMT_OUI; i++) {
169 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
170 method = vendor_class->method_table[i];
171 if (method) {
172 if (method_in_use(&method, mad_reg_req))
173 return 1;
174 else
175 break;
176 }
177 }
178 }
179 return 0;
180}
181
Sean Hefty2527e682006-07-20 11:25:50 +0300182int ib_response_mad(struct ib_mad *mad)
183{
184 return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
185 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
186 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
187 (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
188}
189EXPORT_SYMBOL(ib_response_mad);
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191/*
192 * ib_register_mad_agent - Register to send/receive MADs
193 */
194struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
195 u8 port_num,
196 enum ib_qp_type qp_type,
197 struct ib_mad_reg_req *mad_reg_req,
198 u8 rmpp_version,
199 ib_mad_send_handler send_handler,
200 ib_mad_recv_handler recv_handler,
Ira Weiny0f29b462014-08-08 19:00:55 -0400201 void *context,
202 u32 registration_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
204 struct ib_mad_port_private *port_priv;
205 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
206 struct ib_mad_agent_private *mad_agent_priv;
207 struct ib_mad_reg_req *reg_req = NULL;
208 struct ib_mad_mgmt_class_table *class;
209 struct ib_mad_mgmt_vendor_class_table *vendor;
210 struct ib_mad_mgmt_vendor_class *vendor_class;
211 struct ib_mad_mgmt_method_table *method;
212 int ret2, qpn;
213 unsigned long flags;
214 u8 mgmt_class, vclass;
215
216 /* Validate parameters */
217 qpn = get_spl_qp_index(qp_type);
Ira Weiny9ad13a42014-08-08 19:00:54 -0400218 if (qpn == -1) {
219 dev_notice(&device->dev,
220 "ib_register_mad_agent: invalid QP Type %d\n",
221 qp_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400223 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Ira Weiny9ad13a42014-08-08 19:00:54 -0400225 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
226 dev_notice(&device->dev,
227 "ib_register_mad_agent: invalid RMPP Version %u\n",
228 rmpp_version);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700229 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
232 /* Validate MAD registration request if supplied */
233 if (mad_reg_req) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400234 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
235 dev_notice(&device->dev,
236 "ib_register_mad_agent: invalid Class Version %u\n",
237 mad_reg_req->mgmt_class_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400239 }
240 if (!recv_handler) {
241 dev_notice(&device->dev,
242 "ib_register_mad_agent: no recv_handler\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
246 /*
247 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
248 * one in this range currently allowed
249 */
250 if (mad_reg_req->mgmt_class !=
Ira Weiny9ad13a42014-08-08 19:00:54 -0400251 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
252 dev_notice(&device->dev,
253 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
254 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 } else if (mad_reg_req->mgmt_class == 0) {
258 /*
259 * Class 0 is reserved in IBA and is used for
260 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
261 */
Ira Weiny9ad13a42014-08-08 19:00:54 -0400262 dev_notice(&device->dev,
263 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 goto error1;
265 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
266 /*
267 * If class is in "new" vendor range,
268 * ensure supplied OUI is not zero
269 */
Ira Weiny9ad13a42014-08-08 19:00:54 -0400270 if (!is_vendor_oui(mad_reg_req->oui)) {
271 dev_notice(&device->dev,
272 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
273 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 }
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800277 /* Make sure class supplied is consistent with RMPP */
Hal Rosenstock64cb9c62006-04-12 21:29:10 -0400278 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400279 if (rmpp_version) {
280 dev_notice(&device->dev,
281 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
282 mad_reg_req->mgmt_class);
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800283 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400284 }
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800285 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 /* Make sure class supplied is consistent with QP type */
287 if (qp_type == IB_QPT_SMI) {
288 if ((mad_reg_req->mgmt_class !=
289 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
290 (mad_reg_req->mgmt_class !=
Ira Weiny9ad13a42014-08-08 19:00:54 -0400291 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
292 dev_notice(&device->dev,
293 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
294 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400296 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 } else {
298 if ((mad_reg_req->mgmt_class ==
299 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
300 (mad_reg_req->mgmt_class ==
Ira Weiny9ad13a42014-08-08 19:00:54 -0400301 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
302 dev_notice(&device->dev,
303 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
304 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 }
308 } else {
309 /* No registration request supplied */
310 if (!send_handler)
311 goto error1;
312 }
313
314 /* Validate device and port */
315 port_priv = ib_get_mad_port(device, port_num);
316 if (!port_priv) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400317 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 ret = ERR_PTR(-ENODEV);
319 goto error1;
320 }
321
Ira Weinyc8367c42011-05-19 18:19:28 -0700322 /* Verify the QP requested is supported. For example, Ethernet devices
323 * will not have QP0 */
324 if (!port_priv->qp_info[qpn].qp) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400325 dev_notice(&device->dev,
326 "ib_register_mad_agent: QP %d not supported\n", qpn);
Ira Weinyc8367c42011-05-19 18:19:28 -0700327 ret = ERR_PTR(-EPROTONOSUPPORT);
328 goto error1;
329 }
330
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 /* Allocate structures */
Roland Dreierde6eb662005-11-02 07:23:14 -0800332 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 if (!mad_agent_priv) {
334 ret = ERR_PTR(-ENOMEM);
335 goto error1;
336 }
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700337
338 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
339 IB_ACCESS_LOCAL_WRITE);
340 if (IS_ERR(mad_agent_priv->agent.mr)) {
341 ret = ERR_PTR(-ENOMEM);
342 goto error2;
343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
345 if (mad_reg_req) {
Julia Lawall9893e742010-05-15 23:22:38 +0200346 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 if (!reg_req) {
348 ret = ERR_PTR(-ENOMEM);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700349 goto error3;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 }
352
353 /* Now, fill in the various structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
355 mad_agent_priv->reg_req = reg_req;
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700356 mad_agent_priv->agent.rmpp_version = rmpp_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 mad_agent_priv->agent.device = device;
358 mad_agent_priv->agent.recv_handler = recv_handler;
359 mad_agent_priv->agent.send_handler = send_handler;
360 mad_agent_priv->agent.context = context;
361 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
362 mad_agent_priv->agent.port_num = port_num;
Ira Weiny0f29b462014-08-08 19:00:55 -0400363 mad_agent_priv->agent.flags = registration_flags;
Ralph Campbelld9620a42009-02-27 14:44:32 -0800364 spin_lock_init(&mad_agent_priv->lock);
365 INIT_LIST_HEAD(&mad_agent_priv->send_list);
366 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
367 INIT_LIST_HEAD(&mad_agent_priv->done_list);
368 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
369 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
370 INIT_LIST_HEAD(&mad_agent_priv->local_list);
371 INIT_WORK(&mad_agent_priv->local_work, local_completions);
372 atomic_set(&mad_agent_priv->refcount, 1);
373 init_completion(&mad_agent_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375 spin_lock_irqsave(&port_priv->reg_lock, flags);
376 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
377
378 /*
379 * Make sure MAD registration (if supplied)
380 * is non overlapping with any existing ones
381 */
382 if (mad_reg_req) {
383 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
384 if (!is_vendor_class(mgmt_class)) {
385 class = port_priv->version[mad_reg_req->
386 mgmt_class_version].class;
387 if (class) {
388 method = class->method_table[mgmt_class];
389 if (method) {
390 if (method_in_use(&method,
391 mad_reg_req))
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700392 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 }
394 }
395 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
396 mgmt_class);
397 } else {
398 /* "New" vendor class range */
399 vendor = port_priv->version[mad_reg_req->
400 mgmt_class_version].vendor;
401 if (vendor) {
402 vclass = vendor_class_index(mgmt_class);
403 vendor_class = vendor->vendor_class[vclass];
404 if (vendor_class) {
405 if (is_vendor_method_in_use(
406 vendor_class,
407 mad_reg_req))
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700408 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 }
410 }
411 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
412 }
413 if (ret2) {
414 ret = ERR_PTR(ret2);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700415 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 }
417 }
418
419 /* Add mad agent into port's agent list */
420 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
421 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 return &mad_agent_priv->agent;
424
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700425error4:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
427 kfree(reg_req);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700428error3:
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700429 ib_dereg_mr(mad_agent_priv->agent.mr);
Adrian Bunk2012a112005-11-27 00:37:36 +0100430error2:
431 kfree(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432error1:
433 return ret;
434}
435EXPORT_SYMBOL(ib_register_mad_agent);
436
437static inline int is_snooping_sends(int mad_snoop_flags)
438{
439 return (mad_snoop_flags &
440 (/*IB_MAD_SNOOP_POSTED_SENDS |
441 IB_MAD_SNOOP_RMPP_SENDS |*/
442 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
443 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
444}
445
446static inline int is_snooping_recvs(int mad_snoop_flags)
447{
448 return (mad_snoop_flags &
449 (IB_MAD_SNOOP_RECVS /*|
450 IB_MAD_SNOOP_RMPP_RECVS*/));
451}
452
453static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
454 struct ib_mad_snoop_private *mad_snoop_priv)
455{
456 struct ib_mad_snoop_private **new_snoop_table;
457 unsigned long flags;
458 int i;
459
460 spin_lock_irqsave(&qp_info->snoop_lock, flags);
461 /* Check for empty slot in array. */
462 for (i = 0; i < qp_info->snoop_table_size; i++)
463 if (!qp_info->snoop_table[i])
464 break;
465
466 if (i == qp_info->snoop_table_size) {
467 /* Grow table. */
Roland Dreier528051742008-10-14 14:05:36 -0700468 new_snoop_table = krealloc(qp_info->snoop_table,
469 sizeof mad_snoop_priv *
470 (qp_info->snoop_table_size + 1),
471 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 if (!new_snoop_table) {
473 i = -ENOMEM;
474 goto out;
475 }
Roland Dreier528051742008-10-14 14:05:36 -0700476
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 qp_info->snoop_table = new_snoop_table;
478 qp_info->snoop_table_size++;
479 }
480 qp_info->snoop_table[i] = mad_snoop_priv;
481 atomic_inc(&qp_info->snoop_count);
482out:
483 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
484 return i;
485}
486
487struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
488 u8 port_num,
489 enum ib_qp_type qp_type,
490 int mad_snoop_flags,
491 ib_mad_snoop_handler snoop_handler,
492 ib_mad_recv_handler recv_handler,
493 void *context)
494{
495 struct ib_mad_port_private *port_priv;
496 struct ib_mad_agent *ret;
497 struct ib_mad_snoop_private *mad_snoop_priv;
498 int qpn;
499
500 /* Validate parameters */
501 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
502 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
503 ret = ERR_PTR(-EINVAL);
504 goto error1;
505 }
506 qpn = get_spl_qp_index(qp_type);
507 if (qpn == -1) {
508 ret = ERR_PTR(-EINVAL);
509 goto error1;
510 }
511 port_priv = ib_get_mad_port(device, port_num);
512 if (!port_priv) {
513 ret = ERR_PTR(-ENODEV);
514 goto error1;
515 }
516 /* Allocate structures */
Roland Dreierde6eb662005-11-02 07:23:14 -0800517 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 if (!mad_snoop_priv) {
519 ret = ERR_PTR(-ENOMEM);
520 goto error1;
521 }
522
523 /* Now, fill in the various structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
525 mad_snoop_priv->agent.device = device;
526 mad_snoop_priv->agent.recv_handler = recv_handler;
527 mad_snoop_priv->agent.snoop_handler = snoop_handler;
528 mad_snoop_priv->agent.context = context;
529 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
530 mad_snoop_priv->agent.port_num = port_num;
531 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
Sean Hefty1b52fa982006-05-12 14:57:52 -0700532 init_completion(&mad_snoop_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 mad_snoop_priv->snoop_index = register_snoop_agent(
534 &port_priv->qp_info[qpn],
535 mad_snoop_priv);
536 if (mad_snoop_priv->snoop_index < 0) {
537 ret = ERR_PTR(mad_snoop_priv->snoop_index);
538 goto error2;
539 }
540
541 atomic_set(&mad_snoop_priv->refcount, 1);
542 return &mad_snoop_priv->agent;
543
544error2:
545 kfree(mad_snoop_priv);
546error1:
547 return ret;
548}
549EXPORT_SYMBOL(ib_register_mad_snoop);
550
Sean Hefty1b52fa982006-05-12 14:57:52 -0700551static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
552{
553 if (atomic_dec_and_test(&mad_agent_priv->refcount))
554 complete(&mad_agent_priv->comp);
555}
556
557static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
558{
559 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
560 complete(&mad_snoop_priv->comp);
561}
562
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
564{
565 struct ib_mad_port_private *port_priv;
566 unsigned long flags;
567
568 /* Note that we could still be handling received MADs */
569
570 /*
571 * Canceling all sends results in dropping received response
572 * MADs, preventing us from queuing additional work
573 */
574 cancel_mads(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 port_priv = mad_agent_priv->qp_info->port_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578 spin_lock_irqsave(&port_priv->reg_lock, flags);
579 remove_mad_reg_req(mad_agent_priv);
580 list_del(&mad_agent_priv->agent_list);
581 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
582
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700583 flush_workqueue(port_priv->wq);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700584 ib_cancel_rmpp_recvs(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
Sean Hefty1b52fa982006-05-12 14:57:52 -0700586 deref_mad_agent(mad_agent_priv);
587 wait_for_completion(&mad_agent_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Jesper Juhl6044ec82005-11-07 01:01:32 -0800589 kfree(mad_agent_priv->reg_req);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700590 ib_dereg_mr(mad_agent_priv->agent.mr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 kfree(mad_agent_priv);
592}
593
594static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
595{
596 struct ib_mad_qp_info *qp_info;
597 unsigned long flags;
598
599 qp_info = mad_snoop_priv->qp_info;
600 spin_lock_irqsave(&qp_info->snoop_lock, flags);
601 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
602 atomic_dec(&qp_info->snoop_count);
603 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
604
Sean Hefty1b52fa982006-05-12 14:57:52 -0700605 deref_snoop_agent(mad_snoop_priv);
606 wait_for_completion(&mad_snoop_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607
608 kfree(mad_snoop_priv);
609}
610
611/*
612 * ib_unregister_mad_agent - Unregisters a client from using MAD services
613 */
614int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
615{
616 struct ib_mad_agent_private *mad_agent_priv;
617 struct ib_mad_snoop_private *mad_snoop_priv;
618
619 /* If the TID is zero, the agent can only snoop. */
620 if (mad_agent->hi_tid) {
621 mad_agent_priv = container_of(mad_agent,
622 struct ib_mad_agent_private,
623 agent);
624 unregister_mad_agent(mad_agent_priv);
625 } else {
626 mad_snoop_priv = container_of(mad_agent,
627 struct ib_mad_snoop_private,
628 agent);
629 unregister_mad_snoop(mad_snoop_priv);
630 }
631 return 0;
632}
633EXPORT_SYMBOL(ib_unregister_mad_agent);
634
635static void dequeue_mad(struct ib_mad_list_head *mad_list)
636{
637 struct ib_mad_queue *mad_queue;
638 unsigned long flags;
639
640 BUG_ON(!mad_list->mad_queue);
641 mad_queue = mad_list->mad_queue;
642 spin_lock_irqsave(&mad_queue->lock, flags);
643 list_del(&mad_list->list);
644 mad_queue->count--;
645 spin_unlock_irqrestore(&mad_queue->lock, flags);
646}
647
648static void snoop_send(struct ib_mad_qp_info *qp_info,
Sean Hefty34816ad2005-10-25 10:51:39 -0700649 struct ib_mad_send_buf *send_buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 struct ib_mad_send_wc *mad_send_wc,
651 int mad_snoop_flags)
652{
653 struct ib_mad_snoop_private *mad_snoop_priv;
654 unsigned long flags;
655 int i;
656
657 spin_lock_irqsave(&qp_info->snoop_lock, flags);
658 for (i = 0; i < qp_info->snoop_table_size; i++) {
659 mad_snoop_priv = qp_info->snoop_table[i];
660 if (!mad_snoop_priv ||
661 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
662 continue;
663
664 atomic_inc(&mad_snoop_priv->refcount);
665 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
666 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
Sean Hefty34816ad2005-10-25 10:51:39 -0700667 send_buf, mad_send_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -0700668 deref_snoop_agent(mad_snoop_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 spin_lock_irqsave(&qp_info->snoop_lock, flags);
670 }
671 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
672}
673
674static void snoop_recv(struct ib_mad_qp_info *qp_info,
675 struct ib_mad_recv_wc *mad_recv_wc,
676 int mad_snoop_flags)
677{
678 struct ib_mad_snoop_private *mad_snoop_priv;
679 unsigned long flags;
680 int i;
681
682 spin_lock_irqsave(&qp_info->snoop_lock, flags);
683 for (i = 0; i < qp_info->snoop_table_size; i++) {
684 mad_snoop_priv = qp_info->snoop_table[i];
685 if (!mad_snoop_priv ||
686 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
687 continue;
688
689 atomic_inc(&mad_snoop_priv->refcount);
690 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
691 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
692 mad_recv_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -0700693 deref_snoop_agent(mad_snoop_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 spin_lock_irqsave(&qp_info->snoop_lock, flags);
695 }
696 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
697}
698
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200699static void build_smp_wc(struct ib_qp *qp,
700 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 struct ib_wc *wc)
702{
703 memset(wc, 0, sizeof *wc);
704 wc->wr_id = wr_id;
705 wc->status = IB_WC_SUCCESS;
706 wc->opcode = IB_WC_RECV;
707 wc->pkey_index = pkey_index;
708 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
709 wc->src_qp = IB_QP0;
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200710 wc->qp = qp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 wc->slid = slid;
712 wc->sl = 0;
713 wc->dlid_path_bits = 0;
714 wc->port_num = port_num;
715}
716
717/*
718 * Return 0 if SMP is to be sent
719 * Return 1 if SMP was consumed locally (whether or not solicited)
720 * Return < 0 if error
721 */
722static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
Sean Hefty34816ad2005-10-25 10:51:39 -0700723 struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724{
Hal Rosenstockde493d42007-04-02 11:24:07 -0400725 int ret = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -0700726 struct ib_smp *smp = mad_send_wr->send_buf.mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 unsigned long flags;
728 struct ib_mad_local_private *local;
729 struct ib_mad_private *mad_priv;
730 struct ib_mad_port_private *port_priv;
731 struct ib_mad_agent_private *recv_mad_agent = NULL;
732 struct ib_device *device = mad_agent_priv->agent.device;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -0400733 u8 port_num;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 struct ib_wc mad_wc;
Sean Hefty34816ad2005-10-25 10:51:39 -0700735 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Hal Rosenstock1bae4db2007-05-14 17:21:52 -0400737 if (device->node_type == RDMA_NODE_IB_SWITCH &&
738 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
739 port_num = send_wr->wr.ud.port_num;
740 else
741 port_num = mad_agent_priv->agent.port_num;
742
Ralph Campbell8cf3f042006-02-03 14:28:48 -0800743 /*
744 * Directed route handling starts if the initial LID routed part of
745 * a request or the ending LID routed part of a response is empty.
746 * If we are at the start of the LID routed part, don't update the
747 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
748 */
749 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
750 IB_LID_PERMISSIVE &&
Hal Rosenstockde493d42007-04-02 11:24:07 -0400751 smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
752 IB_SMI_DISCARD) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 ret = -EINVAL;
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400754 dev_err(&device->dev, "Invalid directed route\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 goto out;
756 }
Hal Rosenstockde493d42007-04-02 11:24:07 -0400757
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 /* Check to post send on QP or process locally */
Steve Welch727792d2007-10-23 15:06:10 -0700759 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
760 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 goto out;
762
763 local = kmalloc(sizeof *local, GFP_ATOMIC);
764 if (!local) {
765 ret = -ENOMEM;
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400766 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 goto out;
768 }
769 local->mad_priv = NULL;
770 local->recv_mad_agent = NULL;
771 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
772 if (!mad_priv) {
773 ret = -ENOMEM;
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400774 dev_err(&device->dev, "No memory for local response MAD\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 kfree(local);
776 goto out;
777 }
778
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200779 build_smp_wc(mad_agent_priv->agent.qp,
780 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
Sean Hefty97f52eb2005-08-13 21:05:57 -0700781 send_wr->wr.ud.pkey_index,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 send_wr->wr.ud.port_num, &mad_wc);
783
784 /* No GRH for DR SMP */
785 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
786 (struct ib_mad *)smp,
787 (struct ib_mad *)&mad_priv->mad);
788 switch (ret)
789 {
790 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
Sean Hefty2527e682006-07-20 11:25:50 +0300791 if (ib_response_mad(&mad_priv->mad.mad) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 mad_agent_priv->agent.recv_handler) {
793 local->mad_priv = mad_priv;
794 local->recv_mad_agent = mad_agent_priv;
795 /*
796 * Reference MAD agent until receive
797 * side of local completion handled
798 */
799 atomic_inc(&mad_agent_priv->refcount);
800 } else
801 kmem_cache_free(ib_mad_cache, mad_priv);
802 break;
803 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
804 kmem_cache_free(ib_mad_cache, mad_priv);
Ralph Campbell4780c192009-03-03 14:22:17 -0800805 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 case IB_MAD_RESULT_SUCCESS:
807 /* Treat like an incoming receive MAD */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
809 mad_agent_priv->agent.port_num);
810 if (port_priv) {
Steve Welch727792d2007-10-23 15:06:10 -0700811 memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 recv_mad_agent = find_mad_agent(port_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -0700813 &mad_priv->mad.mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 }
815 if (!port_priv || !recv_mad_agent) {
Ralph Campbell4780c192009-03-03 14:22:17 -0800816 /*
817 * No receiving agent so drop packet and
818 * generate send completion.
819 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 kmem_cache_free(ib_mad_cache, mad_priv);
Ralph Campbell4780c192009-03-03 14:22:17 -0800821 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 }
823 local->mad_priv = mad_priv;
824 local->recv_mad_agent = recv_mad_agent;
825 break;
826 default:
827 kmem_cache_free(ib_mad_cache, mad_priv);
828 kfree(local);
829 ret = -EINVAL;
830 goto out;
831 }
832
Sean Hefty34816ad2005-10-25 10:51:39 -0700833 local->mad_send_wr = mad_send_wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 /* Reference MAD agent until send side of local completion handled */
835 atomic_inc(&mad_agent_priv->refcount);
836 /* Queue local completion to local list */
837 spin_lock_irqsave(&mad_agent_priv->lock, flags);
838 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
839 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
840 queue_work(mad_agent_priv->qp_info->port_priv->wq,
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700841 &mad_agent_priv->local_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 ret = 1;
843out:
844 return ret;
845}
846
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800847static int get_pad_size(int hdr_len, int data_len)
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700848{
849 int seg_size, pad;
850
851 seg_size = sizeof(struct ib_mad) - hdr_len;
852 if (data_len && seg_size) {
853 pad = seg_size - data_len % seg_size;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800854 return pad == seg_size ? 0 : pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700855 } else
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800856 return seg_size;
857}
858
859static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
860{
861 struct ib_rmpp_segment *s, *t;
862
863 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
864 list_del(&s->list);
865 kfree(s);
866 }
867}
868
869static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
870 gfp_t gfp_mask)
871{
872 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
873 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
874 struct ib_rmpp_segment *seg = NULL;
875 int left, seg_size, pad;
876
877 send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
878 seg_size = send_buf->seg_size;
879 pad = send_wr->pad;
880
881 /* Allocate data segments. */
882 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
883 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
884 if (!seg) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400885 dev_err(&send_buf->mad_agent->device->dev,
886 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
887 sizeof (*seg) + seg_size, gfp_mask);
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800888 free_send_rmpp_list(send_wr);
889 return -ENOMEM;
890 }
891 seg->num = ++send_buf->seg_count;
892 list_add_tail(&seg->list, &send_wr->rmpp_list);
893 }
894
895 /* Zero any padding */
896 if (pad)
897 memset(seg->data + seg_size - pad, 0, pad);
898
899 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
900 agent.rmpp_version;
901 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
902 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
903
904 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
905 struct ib_rmpp_segment, list);
906 send_wr->last_ack_seg = send_wr->cur_seg;
907 return 0;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700908}
909
910struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
911 u32 remote_qpn, u16 pkey_index,
Sean Hefty34816ad2005-10-25 10:51:39 -0700912 int rmpp_active,
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700913 int hdr_len, int data_len,
Al Virodd0fc662005-10-07 07:46:04 +0100914 gfp_t gfp_mask)
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700915{
916 struct ib_mad_agent_private *mad_agent_priv;
Sean Hefty34816ad2005-10-25 10:51:39 -0700917 struct ib_mad_send_wr_private *mad_send_wr;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800918 int pad, message_size, ret, size;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700919 void *buf;
920
Sean Hefty34816ad2005-10-25 10:51:39 -0700921 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
922 agent);
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800923 pad = get_pad_size(hdr_len, data_len);
924 message_size = hdr_len + data_len + pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700925
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700926 if ((!mad_agent->rmpp_version &&
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800927 (rmpp_active || message_size > sizeof(struct ib_mad))) ||
928 (!rmpp_active && message_size > sizeof(struct ib_mad)))
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700929 return ERR_PTR(-EINVAL);
930
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800931 size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
932 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700933 if (!buf)
934 return ERR_PTR(-ENOMEM);
935
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800936 mad_send_wr = buf + size;
937 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
Sean Hefty34816ad2005-10-25 10:51:39 -0700938 mad_send_wr->send_buf.mad = buf;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800939 mad_send_wr->send_buf.hdr_len = hdr_len;
940 mad_send_wr->send_buf.data_len = data_len;
941 mad_send_wr->pad = pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700942
Sean Hefty34816ad2005-10-25 10:51:39 -0700943 mad_send_wr->mad_agent_priv = mad_agent_priv;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800944 mad_send_wr->sg_list[0].length = hdr_len;
Sean Hefty34816ad2005-10-25 10:51:39 -0700945 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800946 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
947 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700948
Sean Hefty34816ad2005-10-25 10:51:39 -0700949 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
950 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800951 mad_send_wr->send_wr.num_sge = 2;
Sean Hefty34816ad2005-10-25 10:51:39 -0700952 mad_send_wr->send_wr.opcode = IB_WR_SEND;
953 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
954 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
955 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
956 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700957
958 if (rmpp_active) {
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800959 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
960 if (ret) {
961 kfree(buf);
962 return ERR_PTR(ret);
963 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700964 }
965
Sean Hefty34816ad2005-10-25 10:51:39 -0700966 mad_send_wr->send_buf.mad_agent = mad_agent;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700967 atomic_inc(&mad_agent_priv->refcount);
Sean Hefty34816ad2005-10-25 10:51:39 -0700968 return &mad_send_wr->send_buf;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700969}
970EXPORT_SYMBOL(ib_create_send_mad);
971
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800972int ib_get_mad_data_offset(u8 mgmt_class)
973{
974 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
975 return IB_MGMT_SA_HDR;
976 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
977 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
978 (mgmt_class == IB_MGMT_CLASS_BIS))
979 return IB_MGMT_DEVICE_HDR;
980 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
981 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
982 return IB_MGMT_VENDOR_HDR;
983 else
984 return IB_MGMT_MAD_HDR;
985}
986EXPORT_SYMBOL(ib_get_mad_data_offset);
987
988int ib_is_mad_class_rmpp(u8 mgmt_class)
989{
990 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
991 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
992 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
993 (mgmt_class == IB_MGMT_CLASS_BIS) ||
994 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
995 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
996 return 1;
997 return 0;
998}
999EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1000
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001001void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1002{
1003 struct ib_mad_send_wr_private *mad_send_wr;
1004 struct list_head *list;
1005
1006 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1007 send_buf);
1008 list = &mad_send_wr->cur_seg->list;
1009
1010 if (mad_send_wr->cur_seg->num < seg_num) {
1011 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1012 if (mad_send_wr->cur_seg->num == seg_num)
1013 break;
1014 } else if (mad_send_wr->cur_seg->num > seg_num) {
1015 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1016 if (mad_send_wr->cur_seg->num == seg_num)
1017 break;
1018 }
1019 return mad_send_wr->cur_seg->data;
1020}
1021EXPORT_SYMBOL(ib_get_rmpp_segment);
1022
1023static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1024{
1025 if (mad_send_wr->send_buf.seg_count)
1026 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1027 mad_send_wr->seg_num);
1028 else
1029 return mad_send_wr->send_buf.mad +
1030 mad_send_wr->send_buf.hdr_len;
1031}
1032
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001033void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1034{
1035 struct ib_mad_agent_private *mad_agent_priv;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001036 struct ib_mad_send_wr_private *mad_send_wr;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001037
1038 mad_agent_priv = container_of(send_buf->mad_agent,
1039 struct ib_mad_agent_private, agent);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001040 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1041 send_buf);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001042
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001043 free_send_rmpp_list(mad_send_wr);
1044 kfree(send_buf->mad);
Sean Hefty1b52fa982006-05-12 14:57:52 -07001045 deref_mad_agent(mad_agent_priv);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001046}
1047EXPORT_SYMBOL(ib_free_send_mad);
1048
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001049int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050{
1051 struct ib_mad_qp_info *qp_info;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001052 struct list_head *list;
Sean Hefty34816ad2005-10-25 10:51:39 -07001053 struct ib_send_wr *bad_send_wr;
1054 struct ib_mad_agent *mad_agent;
1055 struct ib_sge *sge;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 unsigned long flags;
1057 int ret;
1058
Hal Rosenstockf8197a42005-07-27 11:45:24 -07001059 /* Set WR ID to find mad_send_wr upon completion */
Hal Rosenstockd760ce82005-07-27 11:45:25 -07001060 qp_info = mad_send_wr->mad_agent_priv->qp_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1062 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1063
Sean Hefty34816ad2005-10-25 10:51:39 -07001064 mad_agent = mad_send_wr->send_buf.mad_agent;
1065 sge = mad_send_wr->sg_list;
Ralph Campbell15271062006-12-12 14:28:30 -08001066 sge[0].addr = ib_dma_map_single(mad_agent->device,
1067 mad_send_wr->send_buf.mad,
1068 sge[0].length,
1069 DMA_TO_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02001070 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1071 return -ENOMEM;
1072
Ralph Campbell15271062006-12-12 14:28:30 -08001073 mad_send_wr->header_mapping = sge[0].addr;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001074
Ralph Campbell15271062006-12-12 14:28:30 -08001075 sge[1].addr = ib_dma_map_single(mad_agent->device,
1076 ib_get_payload(mad_send_wr),
1077 sge[1].length,
1078 DMA_TO_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02001079 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1080 ib_dma_unmap_single(mad_agent->device,
1081 mad_send_wr->header_mapping,
1082 sge[0].length, DMA_TO_DEVICE);
1083 return -ENOMEM;
1084 }
Ralph Campbell15271062006-12-12 14:28:30 -08001085 mad_send_wr->payload_mapping = sge[1].addr;
Sean Hefty34816ad2005-10-25 10:51:39 -07001086
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001088 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
Sean Hefty34816ad2005-10-25 10:51:39 -07001089 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1090 &bad_send_wr);
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001091 list = &qp_info->send_queue.list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 ret = 0;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001094 list = &qp_info->overflow_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 }
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001096
1097 if (!ret) {
1098 qp_info->send_queue.count++;
1099 list_add_tail(&mad_send_wr->mad_list.list, list);
1100 }
1101 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001102 if (ret) {
Ralph Campbell15271062006-12-12 14:28:30 -08001103 ib_dma_unmap_single(mad_agent->device,
1104 mad_send_wr->header_mapping,
1105 sge[0].length, DMA_TO_DEVICE);
1106 ib_dma_unmap_single(mad_agent->device,
1107 mad_send_wr->payload_mapping,
1108 sge[1].length, DMA_TO_DEVICE);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 return ret;
1111}
1112
1113/*
1114 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1115 * with the registered client
1116 */
Sean Hefty34816ad2005-10-25 10:51:39 -07001117int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1118 struct ib_mad_send_buf **bad_send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 struct ib_mad_agent_private *mad_agent_priv;
Sean Hefty34816ad2005-10-25 10:51:39 -07001121 struct ib_mad_send_buf *next_send_buf;
1122 struct ib_mad_send_wr_private *mad_send_wr;
1123 unsigned long flags;
1124 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125
1126 /* Walk list of send WRs and post each on send list */
Sean Hefty34816ad2005-10-25 10:51:39 -07001127 for (; send_buf; send_buf = next_send_buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128
Sean Hefty34816ad2005-10-25 10:51:39 -07001129 mad_send_wr = container_of(send_buf,
1130 struct ib_mad_send_wr_private,
1131 send_buf);
1132 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
Sean Hefty34816ad2005-10-25 10:51:39 -07001134 if (!send_buf->mad_agent->send_handler ||
1135 (send_buf->timeout_ms &&
1136 !send_buf->mad_agent->recv_handler)) {
1137 ret = -EINVAL;
1138 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 }
1140
Hal Rosenstock618a3c02006-03-28 16:40:04 -08001141 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1142 if (mad_agent_priv->agent.rmpp_version) {
1143 ret = -EINVAL;
1144 goto error;
1145 }
1146 }
1147
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 /*
1149 * Save pointer to next work request to post in case the
1150 * current one completes, and the user modifies the work
1151 * request associated with the completion
1152 */
Sean Hefty34816ad2005-10-25 10:51:39 -07001153 next_send_buf = send_buf->next;
1154 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155
Sean Hefty34816ad2005-10-25 10:51:39 -07001156 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1157 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1158 ret = handle_outgoing_dr_smp(mad_agent_priv,
1159 mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 if (ret < 0) /* error */
Sean Hefty34816ad2005-10-25 10:51:39 -07001161 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 else if (ret == 1) /* locally consumed */
Sean Hefty34816ad2005-10-25 10:51:39 -07001163 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 }
1165
Sean Hefty34816ad2005-10-25 10:51:39 -07001166 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 /* Timeout will be updated after send completes */
Sean Hefty34816ad2005-10-25 10:51:39 -07001168 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
Sean Hefty4fc8cd42007-11-27 00:11:04 -08001169 mad_send_wr->max_retries = send_buf->retries;
1170 mad_send_wr->retries_left = send_buf->retries;
1171 send_buf->retries = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001172 /* Reference for work request to QP + response */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1174 mad_send_wr->status = IB_WC_SUCCESS;
1175
1176 /* Reference MAD agent until send completes */
1177 atomic_inc(&mad_agent_priv->refcount);
1178 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1179 list_add_tail(&mad_send_wr->agent_list,
1180 &mad_agent_priv->send_list);
1181 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1182
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001183 if (mad_agent_priv->agent.rmpp_version) {
1184 ret = ib_send_rmpp_mad(mad_send_wr);
1185 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1186 ret = ib_send_mad(mad_send_wr);
1187 } else
1188 ret = ib_send_mad(mad_send_wr);
1189 if (ret < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 /* Fail send request */
1191 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1192 list_del(&mad_send_wr->agent_list);
1193 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1194 atomic_dec(&mad_agent_priv->refcount);
Sean Hefty34816ad2005-10-25 10:51:39 -07001195 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 }
1198 return 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001199error:
1200 if (bad_send_buf)
1201 *bad_send_buf = send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 return ret;
1203}
1204EXPORT_SYMBOL(ib_post_send_mad);
1205
1206/*
1207 * ib_free_recv_mad - Returns data buffers used to receive
1208 * a MAD to the access layer
1209 */
1210void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1211{
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001212 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 struct ib_mad_private_header *mad_priv_hdr;
1214 struct ib_mad_private *priv;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001215 struct list_head free_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001217 INIT_LIST_HEAD(&free_list);
1218 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001220 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1221 &free_list, list) {
1222 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1223 recv_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 mad_priv_hdr = container_of(mad_recv_wc,
1225 struct ib_mad_private_header,
1226 recv_wc);
1227 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1228 header);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001229 kmem_cache_free(ib_mad_cache, priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231}
1232EXPORT_SYMBOL(ib_free_recv_mad);
1233
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1235 u8 rmpp_version,
1236 ib_mad_send_handler send_handler,
1237 ib_mad_recv_handler recv_handler,
1238 void *context)
1239{
1240 return ERR_PTR(-EINVAL); /* XXX: for now */
1241}
1242EXPORT_SYMBOL(ib_redirect_mad_qp);
1243
1244int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1245 struct ib_wc *wc)
1246{
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001247 dev_err(&mad_agent->device->dev,
1248 "ib_process_mad_wc() not implemented yet\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 return 0;
1250}
1251EXPORT_SYMBOL(ib_process_mad_wc);
1252
1253static int method_in_use(struct ib_mad_mgmt_method_table **method,
1254 struct ib_mad_reg_req *mad_reg_req)
1255{
1256 int i;
1257
Akinobu Mita19b629f2010-03-05 13:41:38 -08001258 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 if ((*method)->agent[i]) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001260 pr_err("Method %d already in use\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 return -EINVAL;
1262 }
1263 }
1264 return 0;
1265}
1266
1267static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1268{
1269 /* Allocate management method table */
Roland Dreierde6eb662005-11-02 07:23:14 -08001270 *method = kzalloc(sizeof **method, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 if (!*method) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001272 pr_err("No memory for ib_mad_mgmt_method_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 return -ENOMEM;
1274 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
1276 return 0;
1277}
1278
1279/*
1280 * Check to see if there are any methods still in use
1281 */
1282static int check_method_table(struct ib_mad_mgmt_method_table *method)
1283{
1284 int i;
1285
1286 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1287 if (method->agent[i])
1288 return 1;
1289 return 0;
1290}
1291
1292/*
1293 * Check to see if there are any method tables for this class still in use
1294 */
1295static int check_class_table(struct ib_mad_mgmt_class_table *class)
1296{
1297 int i;
1298
1299 for (i = 0; i < MAX_MGMT_CLASS; i++)
1300 if (class->method_table[i])
1301 return 1;
1302 return 0;
1303}
1304
1305static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1306{
1307 int i;
1308
1309 for (i = 0; i < MAX_MGMT_OUI; i++)
1310 if (vendor_class->method_table[i])
1311 return 1;
1312 return 0;
1313}
1314
1315static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1316 char *oui)
1317{
1318 int i;
1319
1320 for (i = 0; i < MAX_MGMT_OUI; i++)
Roland Dreier3cd96562006-09-22 15:22:46 -07001321 /* Is there matching OUI for this vendor class ? */
1322 if (!memcmp(vendor_class->oui[i], oui, 3))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 return i;
1324
1325 return -1;
1326}
1327
1328static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1329{
1330 int i;
1331
1332 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1333 if (vendor->vendor_class[i])
1334 return 1;
1335
1336 return 0;
1337}
1338
1339static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1340 struct ib_mad_agent_private *agent)
1341{
1342 int i;
1343
1344 /* Remove any methods for this mad agent */
1345 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1346 if (method->agent[i] == agent) {
1347 method->agent[i] = NULL;
1348 }
1349 }
1350}
1351
1352static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1353 struct ib_mad_agent_private *agent_priv,
1354 u8 mgmt_class)
1355{
1356 struct ib_mad_port_private *port_priv;
1357 struct ib_mad_mgmt_class_table **class;
1358 struct ib_mad_mgmt_method_table **method;
1359 int i, ret;
1360
1361 port_priv = agent_priv->qp_info->port_priv;
1362 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1363 if (!*class) {
1364 /* Allocate management class table for "new" class version */
Roland Dreierde6eb662005-11-02 07:23:14 -08001365 *class = kzalloc(sizeof **class, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 if (!*class) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001367 dev_err(&agent_priv->agent.device->dev,
1368 "No memory for ib_mad_mgmt_class_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 ret = -ENOMEM;
1370 goto error1;
1371 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001372
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 /* Allocate method table for this management class */
1374 method = &(*class)->method_table[mgmt_class];
1375 if ((ret = allocate_method_table(method)))
1376 goto error2;
1377 } else {
1378 method = &(*class)->method_table[mgmt_class];
1379 if (!*method) {
1380 /* Allocate method table for this management class */
1381 if ((ret = allocate_method_table(method)))
1382 goto error1;
1383 }
1384 }
1385
1386 /* Now, make sure methods are not already in use */
1387 if (method_in_use(method, mad_reg_req))
1388 goto error3;
1389
1390 /* Finally, add in methods being registered */
Akinobu Mita19b629f2010-03-05 13:41:38 -08001391 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 (*method)->agent[i] = agent_priv;
Akinobu Mita19b629f2010-03-05 13:41:38 -08001393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 return 0;
1395
1396error3:
1397 /* Remove any methods for this mad agent */
1398 remove_methods_mad_agent(*method, agent_priv);
1399 /* Now, check to see if there are any methods in use */
1400 if (!check_method_table(*method)) {
1401 /* If not, release management method table */
1402 kfree(*method);
1403 *method = NULL;
1404 }
1405 ret = -EINVAL;
1406 goto error1;
1407error2:
1408 kfree(*class);
1409 *class = NULL;
1410error1:
1411 return ret;
1412}
1413
1414static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1415 struct ib_mad_agent_private *agent_priv)
1416{
1417 struct ib_mad_port_private *port_priv;
1418 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1419 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1420 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1421 struct ib_mad_mgmt_method_table **method;
1422 int i, ret = -ENOMEM;
1423 u8 vclass;
1424
1425 /* "New" vendor (with OUI) class */
1426 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1427 port_priv = agent_priv->qp_info->port_priv;
1428 vendor_table = &port_priv->version[
1429 mad_reg_req->mgmt_class_version].vendor;
1430 if (!*vendor_table) {
1431 /* Allocate mgmt vendor class table for "new" class version */
Roland Dreierde6eb662005-11-02 07:23:14 -08001432 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 if (!vendor) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001434 dev_err(&agent_priv->agent.device->dev,
1435 "No memory for ib_mad_mgmt_vendor_class_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 goto error1;
1437 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001438
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 *vendor_table = vendor;
1440 }
1441 if (!(*vendor_table)->vendor_class[vclass]) {
1442 /* Allocate table for this management vendor class */
Roland Dreierde6eb662005-11-02 07:23:14 -08001443 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 if (!vendor_class) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001445 dev_err(&agent_priv->agent.device->dev,
1446 "No memory for ib_mad_mgmt_vendor_class\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 goto error2;
1448 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001449
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 (*vendor_table)->vendor_class[vclass] = vendor_class;
1451 }
1452 for (i = 0; i < MAX_MGMT_OUI; i++) {
1453 /* Is there matching OUI for this vendor class ? */
1454 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1455 mad_reg_req->oui, 3)) {
1456 method = &(*vendor_table)->vendor_class[
1457 vclass]->method_table[i];
1458 BUG_ON(!*method);
1459 goto check_in_use;
1460 }
1461 }
1462 for (i = 0; i < MAX_MGMT_OUI; i++) {
1463 /* OUI slot available ? */
1464 if (!is_vendor_oui((*vendor_table)->vendor_class[
1465 vclass]->oui[i])) {
1466 method = &(*vendor_table)->vendor_class[
1467 vclass]->method_table[i];
1468 BUG_ON(*method);
1469 /* Allocate method table for this OUI */
1470 if ((ret = allocate_method_table(method)))
1471 goto error3;
1472 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1473 mad_reg_req->oui, 3);
1474 goto check_in_use;
1475 }
1476 }
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001477 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 goto error3;
1479
1480check_in_use:
1481 /* Now, make sure methods are not already in use */
1482 if (method_in_use(method, mad_reg_req))
1483 goto error4;
1484
1485 /* Finally, add in methods being registered */
Akinobu Mita19b629f2010-03-05 13:41:38 -08001486 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 (*method)->agent[i] = agent_priv;
Akinobu Mita19b629f2010-03-05 13:41:38 -08001488
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 return 0;
1490
1491error4:
1492 /* Remove any methods for this mad agent */
1493 remove_methods_mad_agent(*method, agent_priv);
1494 /* Now, check to see if there are any methods in use */
1495 if (!check_method_table(*method)) {
1496 /* If not, release management method table */
1497 kfree(*method);
1498 *method = NULL;
1499 }
1500 ret = -EINVAL;
1501error3:
1502 if (vendor_class) {
1503 (*vendor_table)->vendor_class[vclass] = NULL;
1504 kfree(vendor_class);
1505 }
1506error2:
1507 if (vendor) {
1508 *vendor_table = NULL;
1509 kfree(vendor);
1510 }
1511error1:
1512 return ret;
1513}
1514
1515static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1516{
1517 struct ib_mad_port_private *port_priv;
1518 struct ib_mad_mgmt_class_table *class;
1519 struct ib_mad_mgmt_method_table *method;
1520 struct ib_mad_mgmt_vendor_class_table *vendor;
1521 struct ib_mad_mgmt_vendor_class *vendor_class;
1522 int index;
1523 u8 mgmt_class;
1524
1525 /*
1526 * Was MAD registration request supplied
1527 * with original registration ?
1528 */
1529 if (!agent_priv->reg_req) {
1530 goto out;
1531 }
1532
1533 port_priv = agent_priv->qp_info->port_priv;
1534 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1535 class = port_priv->version[
1536 agent_priv->reg_req->mgmt_class_version].class;
1537 if (!class)
1538 goto vendor_check;
1539
1540 method = class->method_table[mgmt_class];
1541 if (method) {
1542 /* Remove any methods for this mad agent */
1543 remove_methods_mad_agent(method, agent_priv);
1544 /* Now, check to see if there are any methods still in use */
1545 if (!check_method_table(method)) {
1546 /* If not, release management method table */
1547 kfree(method);
1548 class->method_table[mgmt_class] = NULL;
1549 /* Any management classes left ? */
1550 if (!check_class_table(class)) {
1551 /* If not, release management class table */
1552 kfree(class);
1553 port_priv->version[
1554 agent_priv->reg_req->
1555 mgmt_class_version].class = NULL;
1556 }
1557 }
1558 }
1559
1560vendor_check:
1561 if (!is_vendor_class(mgmt_class))
1562 goto out;
1563
1564 /* normalize mgmt_class to vendor range 2 */
1565 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1566 vendor = port_priv->version[
1567 agent_priv->reg_req->mgmt_class_version].vendor;
1568
1569 if (!vendor)
1570 goto out;
1571
1572 vendor_class = vendor->vendor_class[mgmt_class];
1573 if (vendor_class) {
1574 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1575 if (index < 0)
1576 goto out;
1577 method = vendor_class->method_table[index];
1578 if (method) {
1579 /* Remove any methods for this mad agent */
1580 remove_methods_mad_agent(method, agent_priv);
1581 /*
1582 * Now, check to see if there are
1583 * any methods still in use
1584 */
1585 if (!check_method_table(method)) {
1586 /* If not, release management method table */
1587 kfree(method);
1588 vendor_class->method_table[index] = NULL;
1589 memset(vendor_class->oui[index], 0, 3);
1590 /* Any OUIs left ? */
1591 if (!check_vendor_class(vendor_class)) {
1592 /* If not, release vendor class table */
1593 kfree(vendor_class);
1594 vendor->vendor_class[mgmt_class] = NULL;
1595 /* Any other vendor classes left ? */
1596 if (!check_vendor_table(vendor)) {
1597 kfree(vendor);
1598 port_priv->version[
1599 agent_priv->reg_req->
1600 mgmt_class_version].
1601 vendor = NULL;
1602 }
1603 }
1604 }
1605 }
1606 }
1607
1608out:
1609 return;
1610}
1611
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612static struct ib_mad_agent_private *
1613find_mad_agent(struct ib_mad_port_private *port_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001614 struct ib_mad *mad)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615{
1616 struct ib_mad_agent_private *mad_agent = NULL;
1617 unsigned long flags;
1618
1619 spin_lock_irqsave(&port_priv->reg_lock, flags);
Sean Hefty2527e682006-07-20 11:25:50 +03001620 if (ib_response_mad(mad)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 u32 hi_tid;
1622 struct ib_mad_agent_private *entry;
1623
1624 /*
1625 * Routing is based on high 32 bits of transaction ID
1626 * of MAD.
1627 */
1628 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
Sean Hefty34816ad2005-10-25 10:51:39 -07001629 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 if (entry->agent.hi_tid == hi_tid) {
1631 mad_agent = entry;
1632 break;
1633 }
1634 }
1635 } else {
1636 struct ib_mad_mgmt_class_table *class;
1637 struct ib_mad_mgmt_method_table *method;
1638 struct ib_mad_mgmt_vendor_class_table *vendor;
1639 struct ib_mad_mgmt_vendor_class *vendor_class;
1640 struct ib_vendor_mad *vendor_mad;
1641 int index;
1642
1643 /*
1644 * Routing is based on version, class, and method
1645 * For "newer" vendor MADs, also based on OUI
1646 */
1647 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1648 goto out;
1649 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1650 class = port_priv->version[
1651 mad->mad_hdr.class_version].class;
1652 if (!class)
1653 goto out;
Hefty, Seanb7ab0b12011-10-06 09:33:05 -07001654 if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >=
1655 IB_MGMT_MAX_METHODS)
1656 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 method = class->method_table[convert_mgmt_class(
1658 mad->mad_hdr.mgmt_class)];
1659 if (method)
1660 mad_agent = method->agent[mad->mad_hdr.method &
1661 ~IB_MGMT_METHOD_RESP];
1662 } else {
1663 vendor = port_priv->version[
1664 mad->mad_hdr.class_version].vendor;
1665 if (!vendor)
1666 goto out;
1667 vendor_class = vendor->vendor_class[vendor_class_index(
1668 mad->mad_hdr.mgmt_class)];
1669 if (!vendor_class)
1670 goto out;
1671 /* Find matching OUI */
1672 vendor_mad = (struct ib_vendor_mad *)mad;
1673 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1674 if (index == -1)
1675 goto out;
1676 method = vendor_class->method_table[index];
1677 if (method) {
1678 mad_agent = method->agent[mad->mad_hdr.method &
1679 ~IB_MGMT_METHOD_RESP];
1680 }
1681 }
1682 }
1683
1684 if (mad_agent) {
1685 if (mad_agent->agent.recv_handler)
1686 atomic_inc(&mad_agent->refcount);
1687 else {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001688 dev_notice(&port_priv->device->dev,
1689 "No receive handler for client %p on port %d\n",
1690 &mad_agent->agent, port_priv->port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 mad_agent = NULL;
1692 }
1693 }
1694out:
1695 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1696
1697 return mad_agent;
1698}
1699
1700static int validate_mad(struct ib_mad *mad, u32 qp_num)
1701{
1702 int valid = 0;
1703
1704 /* Make sure MAD base version is understood */
1705 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001706 pr_err("MAD received with unsupported base version %d\n",
1707 mad->mad_hdr.base_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 goto out;
1709 }
1710
1711 /* Filter SMI packets sent to other than QP0 */
1712 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1713 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1714 if (qp_num == 0)
1715 valid = 1;
1716 } else {
1717 /* Filter GSI packets sent to QP0 */
1718 if (qp_num != 0)
1719 valid = 1;
1720 }
1721
1722out:
1723 return valid;
1724}
1725
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001726static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1727 struct ib_mad_hdr *mad_hdr)
1728{
1729 struct ib_rmpp_mad *rmpp_mad;
1730
1731 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1732 return !mad_agent_priv->agent.rmpp_version ||
1733 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1734 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1735 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1736}
1737
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001738static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1739 struct ib_mad_recv_wc *rwc)
1740{
1741 return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1742 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1743}
1744
Jack Morgenstein9874e742006-06-17 20:37:34 -07001745static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1746 struct ib_mad_send_wr_private *wr,
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001747 struct ib_mad_recv_wc *rwc )
1748{
1749 struct ib_ah_attr attr;
1750 u8 send_resp, rcv_resp;
Jack Morgenstein9874e742006-06-17 20:37:34 -07001751 union ib_gid sgid;
1752 struct ib_device *device = mad_agent_priv->agent.device;
1753 u8 port_num = mad_agent_priv->agent.port_num;
1754 u8 lmc;
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001755
Michael Brooks70972282008-09-20 20:06:16 -07001756 send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
1757 rcv_resp = ib_response_mad(rwc->recv_buf.mad);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001758
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001759 if (send_resp == rcv_resp)
1760 /* both requests, or both responses. GIDs different */
1761 return 0;
1762
1763 if (ib_query_ah(wr->send_buf.ah, &attr))
1764 /* Assume not equal, to avoid false positives. */
1765 return 0;
1766
Jack Morgenstein9874e742006-06-17 20:37:34 -07001767 if (!!(attr.ah_flags & IB_AH_GRH) !=
1768 !!(rwc->wc->wc_flags & IB_WC_GRH))
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001769 /* one has GID, other does not. Assume different */
1770 return 0;
Jack Morgenstein9874e742006-06-17 20:37:34 -07001771
1772 if (!send_resp && rcv_resp) {
1773 /* is request/response. */
1774 if (!(attr.ah_flags & IB_AH_GRH)) {
1775 if (ib_get_cached_lmc(device, port_num, &lmc))
1776 return 0;
1777 return (!lmc || !((attr.src_path_bits ^
1778 rwc->wc->dlid_path_bits) &
1779 ((1 << lmc) - 1)));
1780 } else {
1781 if (ib_get_cached_gid(device, port_num,
1782 attr.grh.sgid_index, &sgid))
1783 return 0;
1784 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1785 16);
1786 }
1787 }
1788
1789 if (!(attr.ah_flags & IB_AH_GRH))
1790 return attr.dlid == rwc->wc->slid;
1791 else
1792 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1793 16);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001794}
Jack Morgenstein9874e742006-06-17 20:37:34 -07001795
1796static inline int is_direct(u8 class)
1797{
1798 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1799}
1800
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001801struct ib_mad_send_wr_private*
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001802ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
Jack Morgenstein9874e742006-06-17 20:37:34 -07001803 struct ib_mad_recv_wc *wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804{
Jack Morgenstein9874e742006-06-17 20:37:34 -07001805 struct ib_mad_send_wr_private *wr;
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001806 struct ib_mad *mad;
1807
Jack Morgenstein9874e742006-06-17 20:37:34 -07001808 mad = (struct ib_mad *)wc->recv_buf.mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
Jack Morgenstein9874e742006-06-17 20:37:34 -07001810 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1811 if ((wr->tid == mad->mad_hdr.tid) &&
1812 rcv_has_same_class(wr, wc) &&
1813 /*
1814 * Don't check GID for direct routed MADs.
1815 * These might have permissive LIDs.
1816 */
1817 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1818 rcv_has_same_gid(mad_agent_priv, wr, wc)))
Roland Dreier39798692006-11-13 09:38:07 -08001819 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 }
1821
1822 /*
1823 * It's possible to receive the response before we've
1824 * been notified that the send has completed
1825 */
Jack Morgenstein9874e742006-06-17 20:37:34 -07001826 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1827 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1828 wr->tid == mad->mad_hdr.tid &&
1829 wr->timeout &&
1830 rcv_has_same_class(wr, wc) &&
1831 /*
1832 * Don't check GID for direct routed MADs.
1833 * These might have permissive LIDs.
1834 */
1835 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1836 rcv_has_same_gid(mad_agent_priv, wr, wc)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 /* Verify request has not been canceled */
Jack Morgenstein9874e742006-06-17 20:37:34 -07001838 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 }
1840 return NULL;
1841}
1842
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001843void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001844{
1845 mad_send_wr->timeout = 0;
Akinobu Mita179e0912006-06-26 00:24:41 -07001846 if (mad_send_wr->refcount == 1)
1847 list_move_tail(&mad_send_wr->agent_list,
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001848 &mad_send_wr->mad_agent_priv->done_list);
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001849}
1850
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001852 struct ib_mad_recv_wc *mad_recv_wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853{
1854 struct ib_mad_send_wr_private *mad_send_wr;
1855 struct ib_mad_send_wc mad_send_wc;
1856 unsigned long flags;
1857
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001858 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1859 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1860 if (mad_agent_priv->agent.rmpp_version) {
1861 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1862 mad_recv_wc);
1863 if (!mad_recv_wc) {
Sean Hefty1b52fa982006-05-12 14:57:52 -07001864 deref_mad_agent(mad_agent_priv);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001865 return;
1866 }
1867 }
1868
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 /* Complete corresponding request */
Sean Hefty2527e682006-07-20 11:25:50 +03001870 if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001872 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 if (!mad_send_wr) {
1874 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001875 ib_free_recv_mad(mad_recv_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -07001876 deref_mad_agent(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 return;
1878 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001879 ib_mark_mad_done(mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1881
1882 /* Defined behavior is to complete response before request */
Sean Hefty34816ad2005-10-25 10:51:39 -07001883 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001884 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1885 mad_recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 atomic_dec(&mad_agent_priv->refcount);
1887
1888 mad_send_wc.status = IB_WC_SUCCESS;
1889 mad_send_wc.vendor_err = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001890 mad_send_wc.send_buf = &mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1892 } else {
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001893 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1894 mad_recv_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -07001895 deref_mad_agent(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 }
1897}
1898
Swapna Thete0b307042012-02-25 17:47:32 -08001899static bool generate_unmatched_resp(struct ib_mad_private *recv,
1900 struct ib_mad_private *response)
1901{
1902 if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET ||
1903 recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) {
1904 memcpy(response, recv, sizeof *response);
1905 response->header.recv_wc.wc = &response->header.wc;
1906 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1907 response->header.recv_wc.recv_buf.grh = &response->grh;
1908 response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
1909 response->mad.mad.mad_hdr.status =
1910 cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
Jack Morgenstein840777d2012-04-24 16:06:50 -07001911 if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
1912 response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION;
Swapna Thete0b307042012-02-25 17:47:32 -08001913
1914 return true;
1915 } else {
1916 return false;
1917 }
1918}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1920 struct ib_wc *wc)
1921{
1922 struct ib_mad_qp_info *qp_info;
1923 struct ib_mad_private_header *mad_priv_hdr;
Hal Rosenstock445d6802007-08-03 10:45:17 -07001924 struct ib_mad_private *recv, *response = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 struct ib_mad_list_head *mad_list;
1926 struct ib_mad_agent_private *mad_agent;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001927 int port_num;
Jack Morgensteina9e74322012-04-24 16:08:57 -07001928 int ret = IB_MAD_RESULT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1931 qp_info = mad_list->mad_queue->qp_info;
1932 dequeue_mad(mad_list);
1933
1934 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1935 mad_list);
1936 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
Ralph Campbell15271062006-12-12 14:28:30 -08001937 ib_dma_unmap_single(port_priv->device,
1938 recv->header.mapping,
1939 sizeof(struct ib_mad_private) -
1940 sizeof(struct ib_mad_private_header),
1941 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
1943 /* Setup MAD receive work completion from "normal" work completion */
Sean Hefty24239af2005-04-16 15:26:08 -07001944 recv->header.wc = *wc;
1945 recv->header.recv_wc.wc = &recv->header.wc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1947 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1948 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1949
1950 if (atomic_read(&qp_info->snoop_count))
1951 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1952
1953 /* Validate MAD */
1954 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1955 goto out;
1956
Hal Rosenstock445d6802007-08-03 10:45:17 -07001957 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1958 if (!response) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001959 dev_err(&port_priv->device->dev,
1960 "ib_mad_recv_done_handler no memory for response buffer\n");
Hal Rosenstock445d6802007-08-03 10:45:17 -07001961 goto out;
1962 }
1963
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001964 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1965 port_num = wc->port_num;
1966 else
1967 port_num = port_priv->port_num;
1968
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 if (recv->mad.mad.mad_hdr.mgmt_class ==
1970 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001971 enum smi_forward_action retsmi;
1972
Hal Rosenstockde493d42007-04-02 11:24:07 -04001973 if (smi_handle_dr_smp_recv(&recv->mad.smp,
1974 port_priv->device->node_type,
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001975 port_num,
Hal Rosenstockde493d42007-04-02 11:24:07 -04001976 port_priv->device->phys_port_cnt) ==
1977 IB_SMI_DISCARD)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 goto out;
Hal Rosenstockde493d42007-04-02 11:24:07 -04001979
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001980 retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1981 if (retsmi == IB_SMI_LOCAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 goto local;
Hal Rosenstockde493d42007-04-02 11:24:07 -04001983
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001984 if (retsmi == IB_SMI_SEND) { /* don't forward */
1985 if (smi_handle_dr_smp_send(&recv->mad.smp,
1986 port_priv->device->node_type,
1987 port_num) == IB_SMI_DISCARD)
1988 goto out;
Hal Rosenstockde493d42007-04-02 11:24:07 -04001989
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001990 if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1991 goto out;
1992 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1993 /* forward case for switches */
1994 memcpy(response, recv, sizeof(*response));
1995 response->header.recv_wc.wc = &response->header.wc;
1996 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1997 response->header.recv_wc.recv_buf.grh = &response->grh;
1998
Hal Rosenstock86dfbec2007-08-03 10:45:17 -07001999 agent_send_response(&response->mad.mad,
2000 &response->grh, wc,
2001 port_priv->device,
2002 smi_get_fwd_port(&recv->mad.smp),
2003 qp_info->qp->qp_num);
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002004
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 goto out;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002006 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 }
2008
2009local:
2010 /* Give driver "right of first refusal" on incoming MAD */
2011 if (port_priv->device->process_mad) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 ret = port_priv->device->process_mad(port_priv->device, 0,
2013 port_priv->port_num,
2014 wc, &recv->grh,
2015 &recv->mad.mad,
2016 &response->mad.mad);
2017 if (ret & IB_MAD_RESULT_SUCCESS) {
2018 if (ret & IB_MAD_RESULT_CONSUMED)
2019 goto out;
2020 if (ret & IB_MAD_RESULT_REPLY) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002021 agent_send_response(&response->mad.mad,
2022 &recv->grh, wc,
2023 port_priv->device,
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002024 port_num,
Sean Hefty34816ad2005-10-25 10:51:39 -07002025 qp_info->qp->qp_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 goto out;
2027 }
2028 }
2029 }
2030
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07002031 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 if (mad_agent) {
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07002033 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 /*
2035 * recv is freed up in error cases in ib_mad_complete_recv
2036 * or via recv_handler in ib_mad_complete_recv()
2037 */
2038 recv = NULL;
Jack Morgensteina9e74322012-04-24 16:08:57 -07002039 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2040 generate_unmatched_resp(recv, response)) {
Swapna Thete0b307042012-02-25 17:47:32 -08002041 agent_send_response(&response->mad.mad, &recv->grh, wc,
2042 port_priv->device, port_num, qp_info->qp->qp_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 }
2044
2045out:
2046 /* Post another receive request for this QP */
2047 if (response) {
2048 ib_mad_post_receive_mads(qp_info, response);
2049 if (recv)
2050 kmem_cache_free(ib_mad_cache, recv);
2051 } else
2052 ib_mad_post_receive_mads(qp_info, recv);
2053}
2054
2055static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2056{
2057 struct ib_mad_send_wr_private *mad_send_wr;
2058 unsigned long delay;
2059
2060 if (list_empty(&mad_agent_priv->wait_list)) {
Tejun Heo136b5722012-08-21 13:18:24 -07002061 cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 } else {
2063 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2064 struct ib_mad_send_wr_private,
2065 agent_list);
2066
2067 if (time_after(mad_agent_priv->timeout,
2068 mad_send_wr->timeout)) {
2069 mad_agent_priv->timeout = mad_send_wr->timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 delay = mad_send_wr->timeout - jiffies;
2071 if ((long)delay <= 0)
2072 delay = 1;
Tejun Heoe7c2f962012-08-21 13:18:24 -07002073 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2074 &mad_agent_priv->timed_work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 }
2076 }
2077}
2078
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002079static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080{
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002081 struct ib_mad_agent_private *mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 struct ib_mad_send_wr_private *temp_mad_send_wr;
2083 struct list_head *list_item;
2084 unsigned long delay;
2085
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002086 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 list_del(&mad_send_wr->agent_list);
2088
2089 delay = mad_send_wr->timeout;
2090 mad_send_wr->timeout += jiffies;
2091
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002092 if (delay) {
2093 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2094 temp_mad_send_wr = list_entry(list_item,
2095 struct ib_mad_send_wr_private,
2096 agent_list);
2097 if (time_after(mad_send_wr->timeout,
2098 temp_mad_send_wr->timeout))
2099 break;
2100 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 }
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002102 else
2103 list_item = &mad_agent_priv->wait_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 list_add(&mad_send_wr->agent_list, list_item);
2105
2106 /* Reschedule a work item if we have a shorter timeout */
Tejun Heoe7c2f962012-08-21 13:18:24 -07002107 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2108 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2109 &mad_agent_priv->timed_work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110}
2111
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002112void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2113 int timeout_ms)
2114{
2115 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2116 wait_for_response(mad_send_wr);
2117}
2118
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119/*
2120 * Process a send work completion
2121 */
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002122void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2123 struct ib_mad_send_wc *mad_send_wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124{
2125 struct ib_mad_agent_private *mad_agent_priv;
2126 unsigned long flags;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002127 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002129 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002131 if (mad_agent_priv->agent.rmpp_version) {
2132 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2133 if (ret == IB_RMPP_RESULT_CONSUMED)
2134 goto done;
2135 } else
2136 ret = IB_RMPP_RESULT_UNHANDLED;
2137
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 if (mad_send_wc->status != IB_WC_SUCCESS &&
2139 mad_send_wr->status == IB_WC_SUCCESS) {
2140 mad_send_wr->status = mad_send_wc->status;
2141 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2142 }
2143
2144 if (--mad_send_wr->refcount > 0) {
2145 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2146 mad_send_wr->status == IB_WC_SUCCESS) {
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002147 wait_for_response(mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002149 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 }
2151
2152 /* Remove send from MAD agent and notify client of completion */
2153 list_del(&mad_send_wr->agent_list);
2154 adjust_timeout(mad_agent_priv);
2155 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2156
2157 if (mad_send_wr->status != IB_WC_SUCCESS )
2158 mad_send_wc->status = mad_send_wr->status;
Sean Hefty34816ad2005-10-25 10:51:39 -07002159 if (ret == IB_RMPP_RESULT_INTERNAL)
2160 ib_rmpp_send_handler(mad_send_wc);
2161 else
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002162 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2163 mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164
2165 /* Release reference on agent taken when sending */
Sean Hefty1b52fa982006-05-12 14:57:52 -07002166 deref_mad_agent(mad_agent_priv);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002167 return;
2168done:
2169 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170}
2171
2172static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2173 struct ib_wc *wc)
2174{
2175 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2176 struct ib_mad_list_head *mad_list;
2177 struct ib_mad_qp_info *qp_info;
2178 struct ib_mad_queue *send_queue;
2179 struct ib_send_wr *bad_send_wr;
Sean Hefty34816ad2005-10-25 10:51:39 -07002180 struct ib_mad_send_wc mad_send_wc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 unsigned long flags;
2182 int ret;
2183
2184 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2185 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2186 mad_list);
2187 send_queue = mad_list->mad_queue;
2188 qp_info = send_queue->qp_info;
2189
2190retry:
Ralph Campbell15271062006-12-12 14:28:30 -08002191 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2192 mad_send_wr->header_mapping,
2193 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2194 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2195 mad_send_wr->payload_mapping,
2196 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 queued_send_wr = NULL;
2198 spin_lock_irqsave(&send_queue->lock, flags);
2199 list_del(&mad_list->list);
2200
2201 /* Move queued send to the send queue */
2202 if (send_queue->count-- > send_queue->max_active) {
2203 mad_list = container_of(qp_info->overflow_list.next,
2204 struct ib_mad_list_head, list);
2205 queued_send_wr = container_of(mad_list,
2206 struct ib_mad_send_wr_private,
2207 mad_list);
Akinobu Mita179e0912006-06-26 00:24:41 -07002208 list_move_tail(&mad_list->list, &send_queue->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 }
2210 spin_unlock_irqrestore(&send_queue->lock, flags);
2211
Sean Hefty34816ad2005-10-25 10:51:39 -07002212 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2213 mad_send_wc.status = wc->status;
2214 mad_send_wc.vendor_err = wc->vendor_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 if (atomic_read(&qp_info->snoop_count))
Sean Hefty34816ad2005-10-25 10:51:39 -07002216 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 IB_MAD_SNOOP_SEND_COMPLETIONS);
Sean Hefty34816ad2005-10-25 10:51:39 -07002218 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
2220 if (queued_send_wr) {
2221 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
Sean Hefty34816ad2005-10-25 10:51:39 -07002222 &bad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002224 dev_err(&port_priv->device->dev,
2225 "ib_post_send failed: %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 mad_send_wr = queued_send_wr;
2227 wc->status = IB_WC_LOC_QP_OP_ERR;
2228 goto retry;
2229 }
2230 }
2231}
2232
2233static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2234{
2235 struct ib_mad_send_wr_private *mad_send_wr;
2236 struct ib_mad_list_head *mad_list;
2237 unsigned long flags;
2238
2239 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2240 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2241 mad_send_wr = container_of(mad_list,
2242 struct ib_mad_send_wr_private,
2243 mad_list);
2244 mad_send_wr->retry = 1;
2245 }
2246 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2247}
2248
2249static void mad_error_handler(struct ib_mad_port_private *port_priv,
2250 struct ib_wc *wc)
2251{
2252 struct ib_mad_list_head *mad_list;
2253 struct ib_mad_qp_info *qp_info;
2254 struct ib_mad_send_wr_private *mad_send_wr;
2255 int ret;
2256
2257 /* Determine if failure was a send or receive */
2258 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2259 qp_info = mad_list->mad_queue->qp_info;
2260 if (mad_list->mad_queue == &qp_info->recv_queue)
2261 /*
2262 * Receive errors indicate that the QP has entered the error
2263 * state - error handling/shutdown code will cleanup
2264 */
2265 return;
2266
2267 /*
2268 * Send errors will transition the QP to SQE - move
2269 * QP to RTS and repost flushed work requests
2270 */
2271 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2272 mad_list);
2273 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2274 if (mad_send_wr->retry) {
2275 /* Repost send */
2276 struct ib_send_wr *bad_send_wr;
2277
2278 mad_send_wr->retry = 0;
2279 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2280 &bad_send_wr);
2281 if (ret)
2282 ib_mad_send_done_handler(port_priv, wc);
2283 } else
2284 ib_mad_send_done_handler(port_priv, wc);
2285 } else {
2286 struct ib_qp_attr *attr;
2287
2288 /* Transition QP to RTS and fail offending send */
2289 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2290 if (attr) {
2291 attr->qp_state = IB_QPS_RTS;
2292 attr->cur_qp_state = IB_QPS_SQE;
2293 ret = ib_modify_qp(qp_info->qp, attr,
2294 IB_QP_STATE | IB_QP_CUR_STATE);
2295 kfree(attr);
2296 if (ret)
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002297 dev_err(&port_priv->device->dev,
2298 "mad_error_handler - ib_modify_qp to RTS : %d\n",
2299 ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 else
2301 mark_sends_for_retry(qp_info);
2302 }
2303 ib_mad_send_done_handler(port_priv, wc);
2304 }
2305}
2306
2307/*
2308 * IB MAD completion callback
2309 */
David Howellsc4028952006-11-22 14:57:56 +00002310static void ib_mad_completion_handler(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311{
2312 struct ib_mad_port_private *port_priv;
2313 struct ib_wc wc;
2314
David Howellsc4028952006-11-22 14:57:56 +00002315 port_priv = container_of(work, struct ib_mad_port_private, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2317
2318 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2319 if (wc.status == IB_WC_SUCCESS) {
2320 switch (wc.opcode) {
2321 case IB_WC_SEND:
2322 ib_mad_send_done_handler(port_priv, &wc);
2323 break;
2324 case IB_WC_RECV:
2325 ib_mad_recv_done_handler(port_priv, &wc);
2326 break;
2327 default:
2328 BUG_ON(1);
2329 break;
2330 }
2331 } else
2332 mad_error_handler(port_priv, &wc);
2333 }
2334}
2335
2336static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2337{
2338 unsigned long flags;
2339 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2340 struct ib_mad_send_wc mad_send_wc;
2341 struct list_head cancel_list;
2342
2343 INIT_LIST_HEAD(&cancel_list);
2344
2345 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2346 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2347 &mad_agent_priv->send_list, agent_list) {
2348 if (mad_send_wr->status == IB_WC_SUCCESS) {
Roland Dreier3cd96562006-09-22 15:22:46 -07002349 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2351 }
2352 }
2353
2354 /* Empty wait list to prevent receives from finding a request */
2355 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2356 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2357
2358 /* Report all cancelled requests */
2359 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2360 mad_send_wc.vendor_err = 0;
2361
2362 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2363 &cancel_list, agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002364 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2365 list_del(&mad_send_wr->agent_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2367 &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 atomic_dec(&mad_agent_priv->refcount);
2369 }
2370}
2371
2372static struct ib_mad_send_wr_private*
Sean Hefty34816ad2005-10-25 10:51:39 -07002373find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2374 struct ib_mad_send_buf *send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375{
2376 struct ib_mad_send_wr_private *mad_send_wr;
2377
2378 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2379 agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002380 if (&mad_send_wr->send_buf == send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 return mad_send_wr;
2382 }
2383
2384 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2385 agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002386 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2387 &mad_send_wr->send_buf == send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 return mad_send_wr;
2389 }
2390 return NULL;
2391}
2392
Sean Hefty34816ad2005-10-25 10:51:39 -07002393int ib_modify_mad(struct ib_mad_agent *mad_agent,
2394 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395{
2396 struct ib_mad_agent_private *mad_agent_priv;
2397 struct ib_mad_send_wr_private *mad_send_wr;
2398 unsigned long flags;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002399 int active;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400
2401 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2402 agent);
2403 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Sean Hefty34816ad2005-10-25 10:51:39 -07002404 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002405 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002407 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 }
2409
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002410 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002411 if (!timeout_ms) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002413 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 }
2415
Sean Hefty34816ad2005-10-25 10:51:39 -07002416 mad_send_wr->send_buf.timeout_ms = timeout_ms;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002417 if (active)
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002418 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2419 else
2420 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002422 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2423 return 0;
2424}
2425EXPORT_SYMBOL(ib_modify_mad);
2426
Sean Hefty34816ad2005-10-25 10:51:39 -07002427void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2428 struct ib_mad_send_buf *send_buf)
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002429{
Sean Hefty34816ad2005-10-25 10:51:39 -07002430 ib_modify_mad(mad_agent, send_buf, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431}
2432EXPORT_SYMBOL(ib_cancel_mad);
2433
David Howellsc4028952006-11-22 14:57:56 +00002434static void local_completions(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435{
2436 struct ib_mad_agent_private *mad_agent_priv;
2437 struct ib_mad_local_private *local;
2438 struct ib_mad_agent_private *recv_mad_agent;
2439 unsigned long flags;
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002440 int free_mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 struct ib_wc wc;
2442 struct ib_mad_send_wc mad_send_wc;
2443
David Howellsc4028952006-11-22 14:57:56 +00002444 mad_agent_priv =
2445 container_of(work, struct ib_mad_agent_private, local_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446
2447 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2448 while (!list_empty(&mad_agent_priv->local_list)) {
2449 local = list_entry(mad_agent_priv->local_list.next,
2450 struct ib_mad_local_private,
2451 completion_list);
Michael S. Tsirkin37289ef2006-03-30 15:52:54 +02002452 list_del(&local->completion_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002454 free_mad = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 if (local->mad_priv) {
2456 recv_mad_agent = local->recv_mad_agent;
2457 if (!recv_mad_agent) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002458 dev_err(&mad_agent_priv->agent.device->dev,
2459 "No receive MAD agent for local completion\n");
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002460 free_mad = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 goto local_send_completion;
2462 }
2463
2464 /*
2465 * Defined behavior is to complete response
2466 * before request
2467 */
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +02002468 build_smp_wc(recv_mad_agent->agent.qp,
2469 (unsigned long) local->mad_send_wr,
Sean Hefty97f52eb2005-08-13 21:05:57 -07002470 be16_to_cpu(IB_LID_PERMISSIVE),
Sean Hefty34816ad2005-10-25 10:51:39 -07002471 0, recv_mad_agent->agent.port_num, &wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472
2473 local->mad_priv->header.recv_wc.wc = &wc;
2474 local->mad_priv->header.recv_wc.mad_len =
2475 sizeof(struct ib_mad);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002476 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2477 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2478 &local->mad_priv->header.recv_wc.rmpp_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2480 local->mad_priv->header.recv_wc.recv_buf.mad =
2481 &local->mad_priv->mad.mad;
2482 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2483 snoop_recv(recv_mad_agent->qp_info,
2484 &local->mad_priv->header.recv_wc,
2485 IB_MAD_SNOOP_RECVS);
2486 recv_mad_agent->agent.recv_handler(
2487 &recv_mad_agent->agent,
2488 &local->mad_priv->header.recv_wc);
2489 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2490 atomic_dec(&recv_mad_agent->refcount);
2491 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2492 }
2493
2494local_send_completion:
2495 /* Complete send */
2496 mad_send_wc.status = IB_WC_SUCCESS;
2497 mad_send_wc.vendor_err = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07002498 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
Sean Hefty34816ad2005-10-25 10:51:39 -07002500 snoop_send(mad_agent_priv->qp_info,
2501 &local->mad_send_wr->send_buf,
2502 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2504 &mad_send_wc);
2505
2506 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 atomic_dec(&mad_agent_priv->refcount);
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002508 if (free_mad)
Hal Rosenstock2c153b92005-07-27 11:45:31 -07002509 kmem_cache_free(ib_mad_cache, local->mad_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 kfree(local);
2511 }
2512 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2513}
2514
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002515static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2516{
2517 int ret;
2518
Sean Hefty4fc8cd42007-11-27 00:11:04 -08002519 if (!mad_send_wr->retries_left)
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002520 return -ETIMEDOUT;
2521
Sean Hefty4fc8cd42007-11-27 00:11:04 -08002522 mad_send_wr->retries_left--;
2523 mad_send_wr->send_buf.retries++;
2524
Sean Hefty34816ad2005-10-25 10:51:39 -07002525 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002526
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002527 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2528 ret = ib_retry_rmpp(mad_send_wr);
2529 switch (ret) {
2530 case IB_RMPP_RESULT_UNHANDLED:
2531 ret = ib_send_mad(mad_send_wr);
2532 break;
2533 case IB_RMPP_RESULT_CONSUMED:
2534 ret = 0;
2535 break;
2536 default:
2537 ret = -ECOMM;
2538 break;
2539 }
2540 } else
2541 ret = ib_send_mad(mad_send_wr);
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002542
2543 if (!ret) {
2544 mad_send_wr->refcount++;
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002545 list_add_tail(&mad_send_wr->agent_list,
2546 &mad_send_wr->mad_agent_priv->send_list);
2547 }
2548 return ret;
2549}
2550
David Howellsc4028952006-11-22 14:57:56 +00002551static void timeout_sends(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552{
2553 struct ib_mad_agent_private *mad_agent_priv;
2554 struct ib_mad_send_wr_private *mad_send_wr;
2555 struct ib_mad_send_wc mad_send_wc;
2556 unsigned long flags, delay;
2557
David Howellsc4028952006-11-22 14:57:56 +00002558 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2559 timed_work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560 mad_send_wc.vendor_err = 0;
2561
2562 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2563 while (!list_empty(&mad_agent_priv->wait_list)) {
2564 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2565 struct ib_mad_send_wr_private,
2566 agent_list);
2567
2568 if (time_after(mad_send_wr->timeout, jiffies)) {
2569 delay = mad_send_wr->timeout - jiffies;
2570 if ((long)delay <= 0)
2571 delay = 1;
2572 queue_delayed_work(mad_agent_priv->qp_info->
2573 port_priv->wq,
2574 &mad_agent_priv->timed_work, delay);
2575 break;
2576 }
2577
Hal Rosenstockdbf92272005-07-27 11:45:30 -07002578 list_del(&mad_send_wr->agent_list);
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002579 if (mad_send_wr->status == IB_WC_SUCCESS &&
2580 !retry_send(mad_send_wr))
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002581 continue;
2582
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2584
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002585 if (mad_send_wr->status == IB_WC_SUCCESS)
2586 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2587 else
2588 mad_send_wc.status = mad_send_wr->status;
Sean Hefty34816ad2005-10-25 10:51:39 -07002589 mad_send_wc.send_buf = &mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2591 &mad_send_wc);
2592
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 atomic_dec(&mad_agent_priv->refcount);
2594 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2595 }
2596 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2597}
2598
Hal Rosenstock5dd2ce12005-08-15 14:16:36 -07002599static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600{
2601 struct ib_mad_port_private *port_priv = cq->cq_context;
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002602 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002604 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2605 if (!list_empty(&port_priv->port_list))
2606 queue_work(port_priv->wq, &port_priv->work);
2607 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608}
2609
2610/*
2611 * Allocate receive MADs and post receive WRs for them
2612 */
2613static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2614 struct ib_mad_private *mad)
2615{
2616 unsigned long flags;
2617 int post, ret;
2618 struct ib_mad_private *mad_priv;
2619 struct ib_sge sg_list;
2620 struct ib_recv_wr recv_wr, *bad_recv_wr;
2621 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2622
2623 /* Initialize common scatter list fields */
2624 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2625 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2626
2627 /* Initialize common receive WR fields */
2628 recv_wr.next = NULL;
2629 recv_wr.sg_list = &sg_list;
2630 recv_wr.num_sge = 1;
2631
2632 do {
2633 /* Allocate and map receive buffer */
2634 if (mad) {
2635 mad_priv = mad;
2636 mad = NULL;
2637 } else {
2638 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2639 if (!mad_priv) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002640 dev_err(&qp_info->port_priv->device->dev,
2641 "No memory for receive buffer\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 ret = -ENOMEM;
2643 break;
2644 }
2645 }
Ralph Campbell15271062006-12-12 14:28:30 -08002646 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2647 &mad_priv->grh,
2648 sizeof *mad_priv -
2649 sizeof mad_priv->header,
2650 DMA_FROM_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02002651 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2652 sg_list.addr))) {
2653 ret = -ENOMEM;
2654 break;
2655 }
Ralph Campbell15271062006-12-12 14:28:30 -08002656 mad_priv->header.mapping = sg_list.addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2658 mad_priv->header.mad_list.mad_queue = recv_queue;
2659
2660 /* Post receive WR */
2661 spin_lock_irqsave(&recv_queue->lock, flags);
2662 post = (++recv_queue->count < recv_queue->max_active);
2663 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2664 spin_unlock_irqrestore(&recv_queue->lock, flags);
2665 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2666 if (ret) {
2667 spin_lock_irqsave(&recv_queue->lock, flags);
2668 list_del(&mad_priv->header.mad_list.list);
2669 recv_queue->count--;
2670 spin_unlock_irqrestore(&recv_queue->lock, flags);
Ralph Campbell15271062006-12-12 14:28:30 -08002671 ib_dma_unmap_single(qp_info->port_priv->device,
2672 mad_priv->header.mapping,
2673 sizeof *mad_priv -
2674 sizeof mad_priv->header,
2675 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676 kmem_cache_free(ib_mad_cache, mad_priv);
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002677 dev_err(&qp_info->port_priv->device->dev,
2678 "ib_post_recv failed: %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 break;
2680 }
2681 } while (post);
2682
2683 return ret;
2684}
2685
2686/*
2687 * Return all the posted receive MADs
2688 */
2689static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2690{
2691 struct ib_mad_private_header *mad_priv_hdr;
2692 struct ib_mad_private *recv;
2693 struct ib_mad_list_head *mad_list;
2694
Eli Cohenfac70d52010-09-27 17:51:11 -07002695 if (!qp_info->qp)
2696 return;
2697
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 while (!list_empty(&qp_info->recv_queue.list)) {
2699
2700 mad_list = list_entry(qp_info->recv_queue.list.next,
2701 struct ib_mad_list_head, list);
2702 mad_priv_hdr = container_of(mad_list,
2703 struct ib_mad_private_header,
2704 mad_list);
2705 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2706 header);
2707
2708 /* Remove from posted receive MAD list */
2709 list_del(&mad_list->list);
2710
Ralph Campbell15271062006-12-12 14:28:30 -08002711 ib_dma_unmap_single(qp_info->port_priv->device,
2712 recv->header.mapping,
2713 sizeof(struct ib_mad_private) -
2714 sizeof(struct ib_mad_private_header),
2715 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 kmem_cache_free(ib_mad_cache, recv);
2717 }
2718
2719 qp_info->recv_queue.count = 0;
2720}
2721
2722/*
2723 * Start the port
2724 */
2725static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2726{
2727 int ret, i;
2728 struct ib_qp_attr *attr;
2729 struct ib_qp *qp;
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002730 u16 pkey_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731
2732 attr = kmalloc(sizeof *attr, GFP_KERNEL);
Roland Dreier3cd96562006-09-22 15:22:46 -07002733 if (!attr) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002734 dev_err(&port_priv->device->dev,
2735 "Couldn't kmalloc ib_qp_attr\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 return -ENOMEM;
2737 }
2738
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002739 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2740 IB_DEFAULT_PKEY_FULL, &pkey_index);
2741 if (ret)
2742 pkey_index = 0;
2743
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2745 qp = port_priv->qp_info[i].qp;
Eli Cohenfac70d52010-09-27 17:51:11 -07002746 if (!qp)
2747 continue;
2748
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 /*
2750 * PKey index for QP1 is irrelevant but
2751 * one is needed for the Reset to Init transition
2752 */
2753 attr->qp_state = IB_QPS_INIT;
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002754 attr->pkey_index = pkey_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2756 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2757 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2758 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002759 dev_err(&port_priv->device->dev,
2760 "Couldn't change QP%d state to INIT: %d\n",
2761 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 goto out;
2763 }
2764
2765 attr->qp_state = IB_QPS_RTR;
2766 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2767 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002768 dev_err(&port_priv->device->dev,
2769 "Couldn't change QP%d state to RTR: %d\n",
2770 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 goto out;
2772 }
2773
2774 attr->qp_state = IB_QPS_RTS;
2775 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2776 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2777 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002778 dev_err(&port_priv->device->dev,
2779 "Couldn't change QP%d state to RTS: %d\n",
2780 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 goto out;
2782 }
2783 }
2784
2785 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2786 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002787 dev_err(&port_priv->device->dev,
2788 "Failed to request completion notification: %d\n",
2789 ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 goto out;
2791 }
2792
2793 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
Eli Cohenfac70d52010-09-27 17:51:11 -07002794 if (!port_priv->qp_info[i].qp)
2795 continue;
2796
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2798 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002799 dev_err(&port_priv->device->dev,
2800 "Couldn't post receive WRs\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 goto out;
2802 }
2803 }
2804out:
2805 kfree(attr);
2806 return ret;
2807}
2808
2809static void qp_event_handler(struct ib_event *event, void *qp_context)
2810{
2811 struct ib_mad_qp_info *qp_info = qp_context;
2812
2813 /* It's worse than that! He's dead, Jim! */
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002814 dev_err(&qp_info->port_priv->device->dev,
2815 "Fatal error (%d) on MAD QP (%d)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 event->event, qp_info->qp->qp_num);
2817}
2818
2819static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2820 struct ib_mad_queue *mad_queue)
2821{
2822 mad_queue->qp_info = qp_info;
2823 mad_queue->count = 0;
2824 spin_lock_init(&mad_queue->lock);
2825 INIT_LIST_HEAD(&mad_queue->list);
2826}
2827
2828static void init_mad_qp(struct ib_mad_port_private *port_priv,
2829 struct ib_mad_qp_info *qp_info)
2830{
2831 qp_info->port_priv = port_priv;
2832 init_mad_queue(qp_info, &qp_info->send_queue);
2833 init_mad_queue(qp_info, &qp_info->recv_queue);
2834 INIT_LIST_HEAD(&qp_info->overflow_list);
2835 spin_lock_init(&qp_info->snoop_lock);
2836 qp_info->snoop_table = NULL;
2837 qp_info->snoop_table_size = 0;
2838 atomic_set(&qp_info->snoop_count, 0);
2839}
2840
2841static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2842 enum ib_qp_type qp_type)
2843{
2844 struct ib_qp_init_attr qp_init_attr;
2845 int ret;
2846
2847 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2848 qp_init_attr.send_cq = qp_info->port_priv->cq;
2849 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2850 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07002851 qp_init_attr.cap.max_send_wr = mad_sendq_size;
2852 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2854 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2855 qp_init_attr.qp_type = qp_type;
2856 qp_init_attr.port_num = qp_info->port_priv->port_num;
2857 qp_init_attr.qp_context = qp_info;
2858 qp_init_attr.event_handler = qp_event_handler;
2859 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2860 if (IS_ERR(qp_info->qp)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002861 dev_err(&qp_info->port_priv->device->dev,
2862 "Couldn't create ib_mad QP%d\n",
2863 get_spl_qp_index(qp_type));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 ret = PTR_ERR(qp_info->qp);
2865 goto error;
2866 }
2867 /* Use minimum queue sizes unless the CQ is resized */
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07002868 qp_info->send_queue.max_active = mad_sendq_size;
2869 qp_info->recv_queue.max_active = mad_recvq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 return 0;
2871
2872error:
2873 return ret;
2874}
2875
2876static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2877{
Eli Cohenfac70d52010-09-27 17:51:11 -07002878 if (!qp_info->qp)
2879 return;
2880
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 ib_destroy_qp(qp_info->qp);
Jesper Juhl6044ec82005-11-07 01:01:32 -08002882 kfree(qp_info->snoop_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883}
2884
2885/*
2886 * Open the port
2887 * Create the QP, PD, MR, and CQ if needed
2888 */
2889static int ib_mad_port_open(struct ib_device *device,
2890 int port_num)
2891{
2892 int ret, cq_size;
2893 struct ib_mad_port_private *port_priv;
2894 unsigned long flags;
2895 char name[sizeof "ib_mad123"];
Eli Cohenfac70d52010-09-27 17:51:11 -07002896 int has_smi;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 /* Create new device info */
Roland Dreierde6eb662005-11-02 07:23:14 -08002899 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 if (!port_priv) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002901 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 return -ENOMEM;
2903 }
Roland Dreierde6eb662005-11-02 07:23:14 -08002904
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 port_priv->device = device;
2906 port_priv->port_num = port_num;
2907 spin_lock_init(&port_priv->reg_lock);
2908 INIT_LIST_HEAD(&port_priv->agent_list);
2909 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2910 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2911
Eli Cohenfac70d52010-09-27 17:51:11 -07002912 cq_size = mad_sendq_size + mad_recvq_size;
2913 has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
2914 if (has_smi)
2915 cq_size *= 2;
2916
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917 port_priv->cq = ib_create_cq(port_priv->device,
Hal Rosenstock5dd2ce12005-08-15 14:16:36 -07002918 ib_mad_thread_completion_handler,
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03002919 NULL, port_priv, cq_size, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 if (IS_ERR(port_priv->cq)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002921 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 ret = PTR_ERR(port_priv->cq);
2923 goto error3;
2924 }
2925
2926 port_priv->pd = ib_alloc_pd(device);
2927 if (IS_ERR(port_priv->pd)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002928 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 ret = PTR_ERR(port_priv->pd);
2930 goto error4;
2931 }
2932
2933 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2934 if (IS_ERR(port_priv->mr)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002935 dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936 ret = PTR_ERR(port_priv->mr);
2937 goto error5;
2938 }
2939
Eli Cohenfac70d52010-09-27 17:51:11 -07002940 if (has_smi) {
2941 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2942 if (ret)
2943 goto error6;
2944 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2946 if (ret)
2947 goto error7;
2948
2949 snprintf(name, sizeof name, "ib_mad%d", port_num);
2950 port_priv->wq = create_singlethread_workqueue(name);
2951 if (!port_priv->wq) {
2952 ret = -ENOMEM;
2953 goto error8;
2954 }
David Howellsc4028952006-11-22 14:57:56 +00002955 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002957 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2958 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2959 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2960
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961 ret = ib_mad_port_start(port_priv);
2962 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002963 dev_err(&device->dev, "Couldn't start port\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 goto error9;
2965 }
2966
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 return 0;
2968
2969error9:
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002970 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2971 list_del_init(&port_priv->port_list);
2972 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2973
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 destroy_workqueue(port_priv->wq);
2975error8:
2976 destroy_mad_qp(&port_priv->qp_info[1]);
2977error7:
2978 destroy_mad_qp(&port_priv->qp_info[0]);
2979error6:
2980 ib_dereg_mr(port_priv->mr);
2981error5:
2982 ib_dealloc_pd(port_priv->pd);
2983error4:
2984 ib_destroy_cq(port_priv->cq);
2985 cleanup_recv_queue(&port_priv->qp_info[1]);
2986 cleanup_recv_queue(&port_priv->qp_info[0]);
2987error3:
2988 kfree(port_priv);
2989
2990 return ret;
2991}
2992
2993/*
2994 * Close the port
2995 * If there are no classes using the port, free the port
2996 * resources (CQ, MR, PD, QP) and remove the port's info structure
2997 */
2998static int ib_mad_port_close(struct ib_device *device, int port_num)
2999{
3000 struct ib_mad_port_private *port_priv;
3001 unsigned long flags;
3002
3003 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3004 port_priv = __ib_get_mad_port(device, port_num);
3005 if (port_priv == NULL) {
3006 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003007 dev_err(&device->dev, "Port %d not found\n", port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008 return -ENODEV;
3009 }
Michael S. Tsirkindc059802006-03-20 10:08:25 -08003010 list_del_init(&port_priv->port_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3012
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 destroy_workqueue(port_priv->wq);
3014 destroy_mad_qp(&port_priv->qp_info[1]);
3015 destroy_mad_qp(&port_priv->qp_info[0]);
3016 ib_dereg_mr(port_priv->mr);
3017 ib_dealloc_pd(port_priv->pd);
3018 ib_destroy_cq(port_priv->cq);
3019 cleanup_recv_queue(&port_priv->qp_info[1]);
3020 cleanup_recv_queue(&port_priv->qp_info[0]);
3021 /* XXX: Handle deallocation of MAD registration tables */
3022
3023 kfree(port_priv);
3024
3025 return 0;
3026}
3027
3028static void ib_mad_init_device(struct ib_device *device)
3029{
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003030 int start, end, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031
Tom Tucker07ebafb2006-08-03 16:02:42 -05003032 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
3033 return;
3034
3035 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003036 start = 0;
3037 end = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038 } else {
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003039 start = 1;
3040 end = device->phys_port_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 }
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003042
3043 for (i = start; i <= end; i++) {
3044 if (ib_mad_port_open(device, i)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003045 dev_err(&device->dev, "Couldn't open port %d\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003046 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 }
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003048 if (ib_agent_port_open(device, i)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003049 dev_err(&device->dev,
3050 "Couldn't open port %d for agents\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003051 goto error_agent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 }
3053 }
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07003054 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003056error_agent:
3057 if (ib_mad_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003058 dev_err(&device->dev, "Couldn't close port %d\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003059
3060error:
3061 i--;
3062
3063 while (i >= start) {
3064 if (ib_agent_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003065 dev_err(&device->dev,
3066 "Couldn't close port %d for agents\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003067 if (ib_mad_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003068 dev_err(&device->dev, "Couldn't close port %d\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069 i--;
3070 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071}
3072
3073static void ib_mad_remove_device(struct ib_device *device)
3074{
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07003075 int i, num_ports, cur_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076
Steve Wise070e1402010-03-04 18:18:18 +00003077 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
3078 return;
3079
Tom Tucker07ebafb2006-08-03 16:02:42 -05003080 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081 num_ports = 1;
3082 cur_port = 0;
3083 } else {
3084 num_ports = device->phys_port_cnt;
3085 cur_port = 1;
3086 }
3087 for (i = 0; i < num_ports; i++, cur_port++) {
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07003088 if (ib_agent_port_close(device, cur_port))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003089 dev_err(&device->dev,
3090 "Couldn't close port %d for agents\n",
3091 cur_port);
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07003092 if (ib_mad_port_close(device, cur_port))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003093 dev_err(&device->dev, "Couldn't close port %d\n",
3094 cur_port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 }
3096}
3097
3098static struct ib_client mad_client = {
3099 .name = "mad",
3100 .add = ib_mad_init_device,
3101 .remove = ib_mad_remove_device
3102};
3103
3104static int __init ib_mad_init_module(void)
3105{
3106 int ret;
3107
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07003108 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3109 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3110
3111 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3112 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3113
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114 ib_mad_cache = kmem_cache_create("ib_mad",
3115 sizeof(struct ib_mad_private),
3116 0,
3117 SLAB_HWCACHE_ALIGN,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118 NULL);
3119 if (!ib_mad_cache) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003120 pr_err("Couldn't create ib_mad cache\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121 ret = -ENOMEM;
3122 goto error1;
3123 }
3124
3125 INIT_LIST_HEAD(&ib_mad_port_list);
3126
3127 if (ib_register_client(&mad_client)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003128 pr_err("Couldn't register ib_mad client\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 ret = -EINVAL;
3130 goto error2;
3131 }
3132
3133 return 0;
3134
3135error2:
3136 kmem_cache_destroy(ib_mad_cache);
3137error1:
3138 return ret;
3139}
3140
3141static void __exit ib_mad_cleanup_module(void)
3142{
3143 ib_unregister_client(&mad_client);
Alexey Dobriyan1a1d92c2006-09-27 01:49:40 -07003144 kmem_cache_destroy(ib_mad_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145}
3146
3147module_init(ib_mad_init_module);
3148module_exit(ib_mad_cleanup_module);