blob: bb3dad20bb8371b5561285b5c716df0c442325e5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Hal Rosenstockde493d42007-04-02 11:24:07 -04002 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
Hal Rosenstockfa619a72005-07-27 11:45:37 -07003 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07005 * Copyright (c) 2009 HNR Consulting. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040038#include <linux/module.h>
Jack Morgenstein9874e742006-06-17 20:37:34 -070039#include <rdma/ib_cache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41#include "mad_priv.h"
Hal Rosenstockfa619a72005-07-27 11:45:37 -070042#include "mad_rmpp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include "smi.h"
44#include "agent.h"
45
46MODULE_LICENSE("Dual BSD/GPL");
47MODULE_DESCRIPTION("kernel IB MAD API");
48MODULE_AUTHOR("Hal Rosenstock");
49MODULE_AUTHOR("Sean Hefty");
50
Roland Dreier16933952010-05-23 21:39:31 -070051static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
52static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
Hal Rosenstockb76aabc2009-09-07 08:28:48 -070053
54module_param_named(send_queue_size, mad_sendq_size, int, 0444);
55MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
56module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
57MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
58
Roland Dreiere54f8182006-11-29 15:33:07 -080059static struct kmem_cache *ib_mad_cache;
Hal Rosenstockfa619a72005-07-27 11:45:37 -070060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061static struct list_head ib_mad_port_list;
62static u32 ib_mad_client_id = 0;
63
64/* Port list lock */
Roland Dreier6276e082009-09-05 20:24:23 -070065static DEFINE_SPINLOCK(ib_mad_port_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
67/* Forward declarations */
68static int method_in_use(struct ib_mad_mgmt_method_table **method,
69 struct ib_mad_reg_req *mad_reg_req);
70static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
71static struct ib_mad_agent_private *find_mad_agent(
72 struct ib_mad_port_private *port_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -070073 struct ib_mad *mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
75 struct ib_mad_private *mad);
76static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
David Howellsc4028952006-11-22 14:57:56 +000077static void timeout_sends(struct work_struct *work);
78static void local_completions(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
80 struct ib_mad_agent_private *agent_priv,
81 u8 mgmt_class);
82static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
83 struct ib_mad_agent_private *agent_priv);
84
85/*
86 * Returns a ib_mad_port_private structure or NULL for a device/port
87 * Assumes ib_mad_port_list_lock is being held
88 */
89static inline struct ib_mad_port_private *
90__ib_get_mad_port(struct ib_device *device, int port_num)
91{
92 struct ib_mad_port_private *entry;
93
94 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
95 if (entry->device == device && entry->port_num == port_num)
96 return entry;
97 }
98 return NULL;
99}
100
101/*
102 * Wrapper function to return a ib_mad_port_private structure or NULL
103 * for a device/port
104 */
105static inline struct ib_mad_port_private *
106ib_get_mad_port(struct ib_device *device, int port_num)
107{
108 struct ib_mad_port_private *entry;
109 unsigned long flags;
110
111 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
112 entry = __ib_get_mad_port(device, port_num);
113 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
114
115 return entry;
116}
117
118static inline u8 convert_mgmt_class(u8 mgmt_class)
119{
120 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
121 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
122 0 : mgmt_class;
123}
124
125static int get_spl_qp_index(enum ib_qp_type qp_type)
126{
127 switch (qp_type)
128 {
129 case IB_QPT_SMI:
130 return 0;
131 case IB_QPT_GSI:
132 return 1;
133 default:
134 return -1;
135 }
136}
137
138static int vendor_class_index(u8 mgmt_class)
139{
140 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
141}
142
143static int is_vendor_class(u8 mgmt_class)
144{
145 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
146 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
147 return 0;
148 return 1;
149}
150
151static int is_vendor_oui(char *oui)
152{
153 if (oui[0] || oui[1] || oui[2])
154 return 1;
155 return 0;
156}
157
158static int is_vendor_method_in_use(
159 struct ib_mad_mgmt_vendor_class *vendor_class,
160 struct ib_mad_reg_req *mad_reg_req)
161{
162 struct ib_mad_mgmt_method_table *method;
163 int i;
164
165 for (i = 0; i < MAX_MGMT_OUI; i++) {
166 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
167 method = vendor_class->method_table[i];
168 if (method) {
169 if (method_in_use(&method, mad_reg_req))
170 return 1;
171 else
172 break;
173 }
174 }
175 }
176 return 0;
177}
178
Sean Hefty2527e682006-07-20 11:25:50 +0300179int ib_response_mad(struct ib_mad *mad)
180{
181 return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
182 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
183 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
184 (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
185}
186EXPORT_SYMBOL(ib_response_mad);
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188/*
189 * ib_register_mad_agent - Register to send/receive MADs
190 */
191struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
192 u8 port_num,
193 enum ib_qp_type qp_type,
194 struct ib_mad_reg_req *mad_reg_req,
195 u8 rmpp_version,
196 ib_mad_send_handler send_handler,
197 ib_mad_recv_handler recv_handler,
198 void *context)
199{
200 struct ib_mad_port_private *port_priv;
201 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
202 struct ib_mad_agent_private *mad_agent_priv;
203 struct ib_mad_reg_req *reg_req = NULL;
204 struct ib_mad_mgmt_class_table *class;
205 struct ib_mad_mgmt_vendor_class_table *vendor;
206 struct ib_mad_mgmt_vendor_class *vendor_class;
207 struct ib_mad_mgmt_method_table *method;
208 int ret2, qpn;
209 unsigned long flags;
210 u8 mgmt_class, vclass;
211
212 /* Validate parameters */
213 qpn = get_spl_qp_index(qp_type);
214 if (qpn == -1)
215 goto error1;
216
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700217 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
218 goto error1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
220 /* Validate MAD registration request if supplied */
221 if (mad_reg_req) {
222 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
223 goto error1;
224 if (!recv_handler)
225 goto error1;
226 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
227 /*
228 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
229 * one in this range currently allowed
230 */
231 if (mad_reg_req->mgmt_class !=
232 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
233 goto error1;
234 } else if (mad_reg_req->mgmt_class == 0) {
235 /*
236 * Class 0 is reserved in IBA and is used for
237 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
238 */
239 goto error1;
240 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
241 /*
242 * If class is in "new" vendor range,
243 * ensure supplied OUI is not zero
244 */
245 if (!is_vendor_oui(mad_reg_req->oui))
246 goto error1;
247 }
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800248 /* Make sure class supplied is consistent with RMPP */
Hal Rosenstock64cb9c62006-04-12 21:29:10 -0400249 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800250 if (rmpp_version)
251 goto error1;
252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 /* Make sure class supplied is consistent with QP type */
254 if (qp_type == IB_QPT_SMI) {
255 if ((mad_reg_req->mgmt_class !=
256 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
257 (mad_reg_req->mgmt_class !=
258 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
259 goto error1;
260 } else {
261 if ((mad_reg_req->mgmt_class ==
262 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
263 (mad_reg_req->mgmt_class ==
264 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
265 goto error1;
266 }
267 } else {
268 /* No registration request supplied */
269 if (!send_handler)
270 goto error1;
271 }
272
273 /* Validate device and port */
274 port_priv = ib_get_mad_port(device, port_num);
275 if (!port_priv) {
276 ret = ERR_PTR(-ENODEV);
277 goto error1;
278 }
279
Ira Weinyc8367c42011-05-19 18:19:28 -0700280 /* Verify the QP requested is supported. For example, Ethernet devices
281 * will not have QP0 */
282 if (!port_priv->qp_info[qpn].qp) {
283 ret = ERR_PTR(-EPROTONOSUPPORT);
284 goto error1;
285 }
286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 /* Allocate structures */
Roland Dreierde6eb662005-11-02 07:23:14 -0800288 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 if (!mad_agent_priv) {
290 ret = ERR_PTR(-ENOMEM);
291 goto error1;
292 }
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700293
294 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
295 IB_ACCESS_LOCAL_WRITE);
296 if (IS_ERR(mad_agent_priv->agent.mr)) {
297 ret = ERR_PTR(-ENOMEM);
298 goto error2;
299 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
301 if (mad_reg_req) {
Julia Lawall9893e742010-05-15 23:22:38 +0200302 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 if (!reg_req) {
304 ret = ERR_PTR(-ENOMEM);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700305 goto error3;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 }
308
309 /* Now, fill in the various structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
311 mad_agent_priv->reg_req = reg_req;
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700312 mad_agent_priv->agent.rmpp_version = rmpp_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 mad_agent_priv->agent.device = device;
314 mad_agent_priv->agent.recv_handler = recv_handler;
315 mad_agent_priv->agent.send_handler = send_handler;
316 mad_agent_priv->agent.context = context;
317 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
318 mad_agent_priv->agent.port_num = port_num;
Ralph Campbelld9620a42009-02-27 14:44:32 -0800319 spin_lock_init(&mad_agent_priv->lock);
320 INIT_LIST_HEAD(&mad_agent_priv->send_list);
321 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
322 INIT_LIST_HEAD(&mad_agent_priv->done_list);
323 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
324 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
325 INIT_LIST_HEAD(&mad_agent_priv->local_list);
326 INIT_WORK(&mad_agent_priv->local_work, local_completions);
327 atomic_set(&mad_agent_priv->refcount, 1);
328 init_completion(&mad_agent_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330 spin_lock_irqsave(&port_priv->reg_lock, flags);
331 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
332
333 /*
334 * Make sure MAD registration (if supplied)
335 * is non overlapping with any existing ones
336 */
337 if (mad_reg_req) {
338 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
339 if (!is_vendor_class(mgmt_class)) {
340 class = port_priv->version[mad_reg_req->
341 mgmt_class_version].class;
342 if (class) {
343 method = class->method_table[mgmt_class];
344 if (method) {
345 if (method_in_use(&method,
346 mad_reg_req))
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700347 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 }
349 }
350 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
351 mgmt_class);
352 } else {
353 /* "New" vendor class range */
354 vendor = port_priv->version[mad_reg_req->
355 mgmt_class_version].vendor;
356 if (vendor) {
357 vclass = vendor_class_index(mgmt_class);
358 vendor_class = vendor->vendor_class[vclass];
359 if (vendor_class) {
360 if (is_vendor_method_in_use(
361 vendor_class,
362 mad_reg_req))
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700363 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 }
365 }
366 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
367 }
368 if (ret2) {
369 ret = ERR_PTR(ret2);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700370 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 }
372 }
373
374 /* Add mad agent into port's agent list */
375 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
376 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 return &mad_agent_priv->agent;
379
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700380error4:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
382 kfree(reg_req);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700383error3:
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700384 ib_dereg_mr(mad_agent_priv->agent.mr);
Adrian Bunk2012a112005-11-27 00:37:36 +0100385error2:
386 kfree(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387error1:
388 return ret;
389}
390EXPORT_SYMBOL(ib_register_mad_agent);
391
392static inline int is_snooping_sends(int mad_snoop_flags)
393{
394 return (mad_snoop_flags &
395 (/*IB_MAD_SNOOP_POSTED_SENDS |
396 IB_MAD_SNOOP_RMPP_SENDS |*/
397 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
398 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
399}
400
401static inline int is_snooping_recvs(int mad_snoop_flags)
402{
403 return (mad_snoop_flags &
404 (IB_MAD_SNOOP_RECVS /*|
405 IB_MAD_SNOOP_RMPP_RECVS*/));
406}
407
408static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
409 struct ib_mad_snoop_private *mad_snoop_priv)
410{
411 struct ib_mad_snoop_private **new_snoop_table;
412 unsigned long flags;
413 int i;
414
415 spin_lock_irqsave(&qp_info->snoop_lock, flags);
416 /* Check for empty slot in array. */
417 for (i = 0; i < qp_info->snoop_table_size; i++)
418 if (!qp_info->snoop_table[i])
419 break;
420
421 if (i == qp_info->snoop_table_size) {
422 /* Grow table. */
Roland Dreier528051742008-10-14 14:05:36 -0700423 new_snoop_table = krealloc(qp_info->snoop_table,
424 sizeof mad_snoop_priv *
425 (qp_info->snoop_table_size + 1),
426 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 if (!new_snoop_table) {
428 i = -ENOMEM;
429 goto out;
430 }
Roland Dreier528051742008-10-14 14:05:36 -0700431
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 qp_info->snoop_table = new_snoop_table;
433 qp_info->snoop_table_size++;
434 }
435 qp_info->snoop_table[i] = mad_snoop_priv;
436 atomic_inc(&qp_info->snoop_count);
437out:
438 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
439 return i;
440}
441
442struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
443 u8 port_num,
444 enum ib_qp_type qp_type,
445 int mad_snoop_flags,
446 ib_mad_snoop_handler snoop_handler,
447 ib_mad_recv_handler recv_handler,
448 void *context)
449{
450 struct ib_mad_port_private *port_priv;
451 struct ib_mad_agent *ret;
452 struct ib_mad_snoop_private *mad_snoop_priv;
453 int qpn;
454
455 /* Validate parameters */
456 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
457 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
458 ret = ERR_PTR(-EINVAL);
459 goto error1;
460 }
461 qpn = get_spl_qp_index(qp_type);
462 if (qpn == -1) {
463 ret = ERR_PTR(-EINVAL);
464 goto error1;
465 }
466 port_priv = ib_get_mad_port(device, port_num);
467 if (!port_priv) {
468 ret = ERR_PTR(-ENODEV);
469 goto error1;
470 }
471 /* Allocate structures */
Roland Dreierde6eb662005-11-02 07:23:14 -0800472 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 if (!mad_snoop_priv) {
474 ret = ERR_PTR(-ENOMEM);
475 goto error1;
476 }
477
478 /* Now, fill in the various structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
480 mad_snoop_priv->agent.device = device;
481 mad_snoop_priv->agent.recv_handler = recv_handler;
482 mad_snoop_priv->agent.snoop_handler = snoop_handler;
483 mad_snoop_priv->agent.context = context;
484 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
485 mad_snoop_priv->agent.port_num = port_num;
486 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
Sean Hefty1b52fa982006-05-12 14:57:52 -0700487 init_completion(&mad_snoop_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 mad_snoop_priv->snoop_index = register_snoop_agent(
489 &port_priv->qp_info[qpn],
490 mad_snoop_priv);
491 if (mad_snoop_priv->snoop_index < 0) {
492 ret = ERR_PTR(mad_snoop_priv->snoop_index);
493 goto error2;
494 }
495
496 atomic_set(&mad_snoop_priv->refcount, 1);
497 return &mad_snoop_priv->agent;
498
499error2:
500 kfree(mad_snoop_priv);
501error1:
502 return ret;
503}
504EXPORT_SYMBOL(ib_register_mad_snoop);
505
Sean Hefty1b52fa982006-05-12 14:57:52 -0700506static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
507{
508 if (atomic_dec_and_test(&mad_agent_priv->refcount))
509 complete(&mad_agent_priv->comp);
510}
511
512static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
513{
514 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
515 complete(&mad_snoop_priv->comp);
516}
517
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
519{
520 struct ib_mad_port_private *port_priv;
521 unsigned long flags;
522
523 /* Note that we could still be handling received MADs */
524
525 /*
526 * Canceling all sends results in dropping received response
527 * MADs, preventing us from queuing additional work
528 */
529 cancel_mads(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 port_priv = mad_agent_priv->qp_info->port_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
533 spin_lock_irqsave(&port_priv->reg_lock, flags);
534 remove_mad_reg_req(mad_agent_priv);
535 list_del(&mad_agent_priv->agent_list);
536 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
537
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700538 flush_workqueue(port_priv->wq);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700539 ib_cancel_rmpp_recvs(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Sean Hefty1b52fa982006-05-12 14:57:52 -0700541 deref_mad_agent(mad_agent_priv);
542 wait_for_completion(&mad_agent_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Jesper Juhl6044ec82005-11-07 01:01:32 -0800544 kfree(mad_agent_priv->reg_req);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700545 ib_dereg_mr(mad_agent_priv->agent.mr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 kfree(mad_agent_priv);
547}
548
549static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
550{
551 struct ib_mad_qp_info *qp_info;
552 unsigned long flags;
553
554 qp_info = mad_snoop_priv->qp_info;
555 spin_lock_irqsave(&qp_info->snoop_lock, flags);
556 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
557 atomic_dec(&qp_info->snoop_count);
558 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
559
Sean Hefty1b52fa982006-05-12 14:57:52 -0700560 deref_snoop_agent(mad_snoop_priv);
561 wait_for_completion(&mad_snoop_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
563 kfree(mad_snoop_priv);
564}
565
566/*
567 * ib_unregister_mad_agent - Unregisters a client from using MAD services
568 */
569int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
570{
571 struct ib_mad_agent_private *mad_agent_priv;
572 struct ib_mad_snoop_private *mad_snoop_priv;
573
574 /* If the TID is zero, the agent can only snoop. */
575 if (mad_agent->hi_tid) {
576 mad_agent_priv = container_of(mad_agent,
577 struct ib_mad_agent_private,
578 agent);
579 unregister_mad_agent(mad_agent_priv);
580 } else {
581 mad_snoop_priv = container_of(mad_agent,
582 struct ib_mad_snoop_private,
583 agent);
584 unregister_mad_snoop(mad_snoop_priv);
585 }
586 return 0;
587}
588EXPORT_SYMBOL(ib_unregister_mad_agent);
589
590static void dequeue_mad(struct ib_mad_list_head *mad_list)
591{
592 struct ib_mad_queue *mad_queue;
593 unsigned long flags;
594
595 BUG_ON(!mad_list->mad_queue);
596 mad_queue = mad_list->mad_queue;
597 spin_lock_irqsave(&mad_queue->lock, flags);
598 list_del(&mad_list->list);
599 mad_queue->count--;
600 spin_unlock_irqrestore(&mad_queue->lock, flags);
601}
602
603static void snoop_send(struct ib_mad_qp_info *qp_info,
Sean Hefty34816ad2005-10-25 10:51:39 -0700604 struct ib_mad_send_buf *send_buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 struct ib_mad_send_wc *mad_send_wc,
606 int mad_snoop_flags)
607{
608 struct ib_mad_snoop_private *mad_snoop_priv;
609 unsigned long flags;
610 int i;
611
612 spin_lock_irqsave(&qp_info->snoop_lock, flags);
613 for (i = 0; i < qp_info->snoop_table_size; i++) {
614 mad_snoop_priv = qp_info->snoop_table[i];
615 if (!mad_snoop_priv ||
616 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
617 continue;
618
619 atomic_inc(&mad_snoop_priv->refcount);
620 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
621 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
Sean Hefty34816ad2005-10-25 10:51:39 -0700622 send_buf, mad_send_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -0700623 deref_snoop_agent(mad_snoop_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 spin_lock_irqsave(&qp_info->snoop_lock, flags);
625 }
626 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
627}
628
629static void snoop_recv(struct ib_mad_qp_info *qp_info,
630 struct ib_mad_recv_wc *mad_recv_wc,
631 int mad_snoop_flags)
632{
633 struct ib_mad_snoop_private *mad_snoop_priv;
634 unsigned long flags;
635 int i;
636
637 spin_lock_irqsave(&qp_info->snoop_lock, flags);
638 for (i = 0; i < qp_info->snoop_table_size; i++) {
639 mad_snoop_priv = qp_info->snoop_table[i];
640 if (!mad_snoop_priv ||
641 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
642 continue;
643
644 atomic_inc(&mad_snoop_priv->refcount);
645 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
646 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
647 mad_recv_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -0700648 deref_snoop_agent(mad_snoop_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 spin_lock_irqsave(&qp_info->snoop_lock, flags);
650 }
651 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
652}
653
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200654static void build_smp_wc(struct ib_qp *qp,
655 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 struct ib_wc *wc)
657{
658 memset(wc, 0, sizeof *wc);
659 wc->wr_id = wr_id;
660 wc->status = IB_WC_SUCCESS;
661 wc->opcode = IB_WC_RECV;
662 wc->pkey_index = pkey_index;
663 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
664 wc->src_qp = IB_QP0;
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200665 wc->qp = qp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 wc->slid = slid;
667 wc->sl = 0;
668 wc->dlid_path_bits = 0;
669 wc->port_num = port_num;
670}
671
672/*
673 * Return 0 if SMP is to be sent
674 * Return 1 if SMP was consumed locally (whether or not solicited)
675 * Return < 0 if error
676 */
677static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
Sean Hefty34816ad2005-10-25 10:51:39 -0700678 struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679{
Hal Rosenstockde493d42007-04-02 11:24:07 -0400680 int ret = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -0700681 struct ib_smp *smp = mad_send_wr->send_buf.mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 unsigned long flags;
683 struct ib_mad_local_private *local;
684 struct ib_mad_private *mad_priv;
685 struct ib_mad_port_private *port_priv;
686 struct ib_mad_agent_private *recv_mad_agent = NULL;
687 struct ib_device *device = mad_agent_priv->agent.device;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -0400688 u8 port_num;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 struct ib_wc mad_wc;
Sean Hefty34816ad2005-10-25 10:51:39 -0700690 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
Hal Rosenstock1bae4db2007-05-14 17:21:52 -0400692 if (device->node_type == RDMA_NODE_IB_SWITCH &&
693 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
694 port_num = send_wr->wr.ud.port_num;
695 else
696 port_num = mad_agent_priv->agent.port_num;
697
Ralph Campbell8cf3f042006-02-03 14:28:48 -0800698 /*
699 * Directed route handling starts if the initial LID routed part of
700 * a request or the ending LID routed part of a response is empty.
701 * If we are at the start of the LID routed part, don't update the
702 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
703 */
704 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
705 IB_LID_PERMISSIVE &&
Hal Rosenstockde493d42007-04-02 11:24:07 -0400706 smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
707 IB_SMI_DISCARD) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 ret = -EINVAL;
709 printk(KERN_ERR PFX "Invalid directed route\n");
710 goto out;
711 }
Hal Rosenstockde493d42007-04-02 11:24:07 -0400712
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 /* Check to post send on QP or process locally */
Steve Welch727792d2007-10-23 15:06:10 -0700714 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
715 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 goto out;
717
718 local = kmalloc(sizeof *local, GFP_ATOMIC);
719 if (!local) {
720 ret = -ENOMEM;
721 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
722 goto out;
723 }
724 local->mad_priv = NULL;
725 local->recv_mad_agent = NULL;
726 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
727 if (!mad_priv) {
728 ret = -ENOMEM;
729 printk(KERN_ERR PFX "No memory for local response MAD\n");
730 kfree(local);
731 goto out;
732 }
733
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200734 build_smp_wc(mad_agent_priv->agent.qp,
735 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
Sean Hefty97f52eb2005-08-13 21:05:57 -0700736 send_wr->wr.ud.pkey_index,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 send_wr->wr.ud.port_num, &mad_wc);
738
739 /* No GRH for DR SMP */
740 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
741 (struct ib_mad *)smp,
742 (struct ib_mad *)&mad_priv->mad);
743 switch (ret)
744 {
745 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
Sean Hefty2527e682006-07-20 11:25:50 +0300746 if (ib_response_mad(&mad_priv->mad.mad) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 mad_agent_priv->agent.recv_handler) {
748 local->mad_priv = mad_priv;
749 local->recv_mad_agent = mad_agent_priv;
750 /*
751 * Reference MAD agent until receive
752 * side of local completion handled
753 */
754 atomic_inc(&mad_agent_priv->refcount);
755 } else
756 kmem_cache_free(ib_mad_cache, mad_priv);
757 break;
758 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
759 kmem_cache_free(ib_mad_cache, mad_priv);
Ralph Campbell4780c192009-03-03 14:22:17 -0800760 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 case IB_MAD_RESULT_SUCCESS:
762 /* Treat like an incoming receive MAD */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
764 mad_agent_priv->agent.port_num);
765 if (port_priv) {
Steve Welch727792d2007-10-23 15:06:10 -0700766 memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 recv_mad_agent = find_mad_agent(port_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -0700768 &mad_priv->mad.mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 }
770 if (!port_priv || !recv_mad_agent) {
Ralph Campbell4780c192009-03-03 14:22:17 -0800771 /*
772 * No receiving agent so drop packet and
773 * generate send completion.
774 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 kmem_cache_free(ib_mad_cache, mad_priv);
Ralph Campbell4780c192009-03-03 14:22:17 -0800776 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 }
778 local->mad_priv = mad_priv;
779 local->recv_mad_agent = recv_mad_agent;
780 break;
781 default:
782 kmem_cache_free(ib_mad_cache, mad_priv);
783 kfree(local);
784 ret = -EINVAL;
785 goto out;
786 }
787
Sean Hefty34816ad2005-10-25 10:51:39 -0700788 local->mad_send_wr = mad_send_wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 /* Reference MAD agent until send side of local completion handled */
790 atomic_inc(&mad_agent_priv->refcount);
791 /* Queue local completion to local list */
792 spin_lock_irqsave(&mad_agent_priv->lock, flags);
793 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
794 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
795 queue_work(mad_agent_priv->qp_info->port_priv->wq,
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700796 &mad_agent_priv->local_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 ret = 1;
798out:
799 return ret;
800}
801
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800802static int get_pad_size(int hdr_len, int data_len)
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700803{
804 int seg_size, pad;
805
806 seg_size = sizeof(struct ib_mad) - hdr_len;
807 if (data_len && seg_size) {
808 pad = seg_size - data_len % seg_size;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800809 return pad == seg_size ? 0 : pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700810 } else
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800811 return seg_size;
812}
813
814static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
815{
816 struct ib_rmpp_segment *s, *t;
817
818 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
819 list_del(&s->list);
820 kfree(s);
821 }
822}
823
824static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
825 gfp_t gfp_mask)
826{
827 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
828 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
829 struct ib_rmpp_segment *seg = NULL;
830 int left, seg_size, pad;
831
832 send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
833 seg_size = send_buf->seg_size;
834 pad = send_wr->pad;
835
836 /* Allocate data segments. */
837 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
838 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
839 if (!seg) {
840 printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
841 "alloc failed for len %zd, gfp %#x\n",
842 sizeof (*seg) + seg_size, gfp_mask);
843 free_send_rmpp_list(send_wr);
844 return -ENOMEM;
845 }
846 seg->num = ++send_buf->seg_count;
847 list_add_tail(&seg->list, &send_wr->rmpp_list);
848 }
849
850 /* Zero any padding */
851 if (pad)
852 memset(seg->data + seg_size - pad, 0, pad);
853
854 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
855 agent.rmpp_version;
856 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
857 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
858
859 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
860 struct ib_rmpp_segment, list);
861 send_wr->last_ack_seg = send_wr->cur_seg;
862 return 0;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700863}
864
865struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
866 u32 remote_qpn, u16 pkey_index,
Sean Hefty34816ad2005-10-25 10:51:39 -0700867 int rmpp_active,
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700868 int hdr_len, int data_len,
Al Virodd0fc662005-10-07 07:46:04 +0100869 gfp_t gfp_mask)
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700870{
871 struct ib_mad_agent_private *mad_agent_priv;
Sean Hefty34816ad2005-10-25 10:51:39 -0700872 struct ib_mad_send_wr_private *mad_send_wr;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800873 int pad, message_size, ret, size;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700874 void *buf;
875
Sean Hefty34816ad2005-10-25 10:51:39 -0700876 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
877 agent);
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800878 pad = get_pad_size(hdr_len, data_len);
879 message_size = hdr_len + data_len + pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700880
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700881 if ((!mad_agent->rmpp_version &&
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800882 (rmpp_active || message_size > sizeof(struct ib_mad))) ||
883 (!rmpp_active && message_size > sizeof(struct ib_mad)))
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700884 return ERR_PTR(-EINVAL);
885
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800886 size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
887 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700888 if (!buf)
889 return ERR_PTR(-ENOMEM);
890
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800891 mad_send_wr = buf + size;
892 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
Sean Hefty34816ad2005-10-25 10:51:39 -0700893 mad_send_wr->send_buf.mad = buf;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800894 mad_send_wr->send_buf.hdr_len = hdr_len;
895 mad_send_wr->send_buf.data_len = data_len;
896 mad_send_wr->pad = pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700897
Sean Hefty34816ad2005-10-25 10:51:39 -0700898 mad_send_wr->mad_agent_priv = mad_agent_priv;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800899 mad_send_wr->sg_list[0].length = hdr_len;
Sean Hefty34816ad2005-10-25 10:51:39 -0700900 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800901 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
902 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700903
Sean Hefty34816ad2005-10-25 10:51:39 -0700904 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
905 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800906 mad_send_wr->send_wr.num_sge = 2;
Sean Hefty34816ad2005-10-25 10:51:39 -0700907 mad_send_wr->send_wr.opcode = IB_WR_SEND;
908 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
909 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
910 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
911 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700912
913 if (rmpp_active) {
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800914 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
915 if (ret) {
916 kfree(buf);
917 return ERR_PTR(ret);
918 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700919 }
920
Sean Hefty34816ad2005-10-25 10:51:39 -0700921 mad_send_wr->send_buf.mad_agent = mad_agent;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700922 atomic_inc(&mad_agent_priv->refcount);
Sean Hefty34816ad2005-10-25 10:51:39 -0700923 return &mad_send_wr->send_buf;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700924}
925EXPORT_SYMBOL(ib_create_send_mad);
926
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800927int ib_get_mad_data_offset(u8 mgmt_class)
928{
929 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
930 return IB_MGMT_SA_HDR;
931 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
932 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
933 (mgmt_class == IB_MGMT_CLASS_BIS))
934 return IB_MGMT_DEVICE_HDR;
935 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
936 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
937 return IB_MGMT_VENDOR_HDR;
938 else
939 return IB_MGMT_MAD_HDR;
940}
941EXPORT_SYMBOL(ib_get_mad_data_offset);
942
943int ib_is_mad_class_rmpp(u8 mgmt_class)
944{
945 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
946 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
947 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
948 (mgmt_class == IB_MGMT_CLASS_BIS) ||
949 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
950 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
951 return 1;
952 return 0;
953}
954EXPORT_SYMBOL(ib_is_mad_class_rmpp);
955
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800956void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
957{
958 struct ib_mad_send_wr_private *mad_send_wr;
959 struct list_head *list;
960
961 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
962 send_buf);
963 list = &mad_send_wr->cur_seg->list;
964
965 if (mad_send_wr->cur_seg->num < seg_num) {
966 list_for_each_entry(mad_send_wr->cur_seg, list, list)
967 if (mad_send_wr->cur_seg->num == seg_num)
968 break;
969 } else if (mad_send_wr->cur_seg->num > seg_num) {
970 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
971 if (mad_send_wr->cur_seg->num == seg_num)
972 break;
973 }
974 return mad_send_wr->cur_seg->data;
975}
976EXPORT_SYMBOL(ib_get_rmpp_segment);
977
978static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
979{
980 if (mad_send_wr->send_buf.seg_count)
981 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
982 mad_send_wr->seg_num);
983 else
984 return mad_send_wr->send_buf.mad +
985 mad_send_wr->send_buf.hdr_len;
986}
987
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700988void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
989{
990 struct ib_mad_agent_private *mad_agent_priv;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800991 struct ib_mad_send_wr_private *mad_send_wr;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700992
993 mad_agent_priv = container_of(send_buf->mad_agent,
994 struct ib_mad_agent_private, agent);
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800995 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
996 send_buf);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700997
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800998 free_send_rmpp_list(mad_send_wr);
999 kfree(send_buf->mad);
Sean Hefty1b52fa982006-05-12 14:57:52 -07001000 deref_mad_agent(mad_agent_priv);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001001}
1002EXPORT_SYMBOL(ib_free_send_mad);
1003
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001004int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005{
1006 struct ib_mad_qp_info *qp_info;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001007 struct list_head *list;
Sean Hefty34816ad2005-10-25 10:51:39 -07001008 struct ib_send_wr *bad_send_wr;
1009 struct ib_mad_agent *mad_agent;
1010 struct ib_sge *sge;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 unsigned long flags;
1012 int ret;
1013
Hal Rosenstockf8197a42005-07-27 11:45:24 -07001014 /* Set WR ID to find mad_send_wr upon completion */
Hal Rosenstockd760ce82005-07-27 11:45:25 -07001015 qp_info = mad_send_wr->mad_agent_priv->qp_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1017 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1018
Sean Hefty34816ad2005-10-25 10:51:39 -07001019 mad_agent = mad_send_wr->send_buf.mad_agent;
1020 sge = mad_send_wr->sg_list;
Ralph Campbell15271062006-12-12 14:28:30 -08001021 sge[0].addr = ib_dma_map_single(mad_agent->device,
1022 mad_send_wr->send_buf.mad,
1023 sge[0].length,
1024 DMA_TO_DEVICE);
1025 mad_send_wr->header_mapping = sge[0].addr;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001026
Ralph Campbell15271062006-12-12 14:28:30 -08001027 sge[1].addr = ib_dma_map_single(mad_agent->device,
1028 ib_get_payload(mad_send_wr),
1029 sge[1].length,
1030 DMA_TO_DEVICE);
1031 mad_send_wr->payload_mapping = sge[1].addr;
Sean Hefty34816ad2005-10-25 10:51:39 -07001032
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001034 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
Sean Hefty34816ad2005-10-25 10:51:39 -07001035 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1036 &bad_send_wr);
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001037 list = &qp_info->send_queue.list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 ret = 0;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001040 list = &qp_info->overflow_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 }
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001042
1043 if (!ret) {
1044 qp_info->send_queue.count++;
1045 list_add_tail(&mad_send_wr->mad_list.list, list);
1046 }
1047 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001048 if (ret) {
Ralph Campbell15271062006-12-12 14:28:30 -08001049 ib_dma_unmap_single(mad_agent->device,
1050 mad_send_wr->header_mapping,
1051 sge[0].length, DMA_TO_DEVICE);
1052 ib_dma_unmap_single(mad_agent->device,
1053 mad_send_wr->payload_mapping,
1054 sge[1].length, DMA_TO_DEVICE);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001055 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 return ret;
1057}
1058
1059/*
1060 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1061 * with the registered client
1062 */
Sean Hefty34816ad2005-10-25 10:51:39 -07001063int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1064 struct ib_mad_send_buf **bad_send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 struct ib_mad_agent_private *mad_agent_priv;
Sean Hefty34816ad2005-10-25 10:51:39 -07001067 struct ib_mad_send_buf *next_send_buf;
1068 struct ib_mad_send_wr_private *mad_send_wr;
1069 unsigned long flags;
1070 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071
1072 /* Walk list of send WRs and post each on send list */
Sean Hefty34816ad2005-10-25 10:51:39 -07001073 for (; send_buf; send_buf = next_send_buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074
Sean Hefty34816ad2005-10-25 10:51:39 -07001075 mad_send_wr = container_of(send_buf,
1076 struct ib_mad_send_wr_private,
1077 send_buf);
1078 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
Sean Hefty34816ad2005-10-25 10:51:39 -07001080 if (!send_buf->mad_agent->send_handler ||
1081 (send_buf->timeout_ms &&
1082 !send_buf->mad_agent->recv_handler)) {
1083 ret = -EINVAL;
1084 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 }
1086
Hal Rosenstock618a3c02006-03-28 16:40:04 -08001087 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1088 if (mad_agent_priv->agent.rmpp_version) {
1089 ret = -EINVAL;
1090 goto error;
1091 }
1092 }
1093
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 /*
1095 * Save pointer to next work request to post in case the
1096 * current one completes, and the user modifies the work
1097 * request associated with the completion
1098 */
Sean Hefty34816ad2005-10-25 10:51:39 -07001099 next_send_buf = send_buf->next;
1100 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
Sean Hefty34816ad2005-10-25 10:51:39 -07001102 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1103 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1104 ret = handle_outgoing_dr_smp(mad_agent_priv,
1105 mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 if (ret < 0) /* error */
Sean Hefty34816ad2005-10-25 10:51:39 -07001107 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 else if (ret == 1) /* locally consumed */
Sean Hefty34816ad2005-10-25 10:51:39 -07001109 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 }
1111
Sean Hefty34816ad2005-10-25 10:51:39 -07001112 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 /* Timeout will be updated after send completes */
Sean Hefty34816ad2005-10-25 10:51:39 -07001114 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
Sean Hefty4fc8cd42007-11-27 00:11:04 -08001115 mad_send_wr->max_retries = send_buf->retries;
1116 mad_send_wr->retries_left = send_buf->retries;
1117 send_buf->retries = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001118 /* Reference for work request to QP + response */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1120 mad_send_wr->status = IB_WC_SUCCESS;
1121
1122 /* Reference MAD agent until send completes */
1123 atomic_inc(&mad_agent_priv->refcount);
1124 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1125 list_add_tail(&mad_send_wr->agent_list,
1126 &mad_agent_priv->send_list);
1127 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1128
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001129 if (mad_agent_priv->agent.rmpp_version) {
1130 ret = ib_send_rmpp_mad(mad_send_wr);
1131 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1132 ret = ib_send_mad(mad_send_wr);
1133 } else
1134 ret = ib_send_mad(mad_send_wr);
1135 if (ret < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 /* Fail send request */
1137 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1138 list_del(&mad_send_wr->agent_list);
1139 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1140 atomic_dec(&mad_agent_priv->refcount);
Sean Hefty34816ad2005-10-25 10:51:39 -07001141 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 }
1144 return 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001145error:
1146 if (bad_send_buf)
1147 *bad_send_buf = send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 return ret;
1149}
1150EXPORT_SYMBOL(ib_post_send_mad);
1151
1152/*
1153 * ib_free_recv_mad - Returns data buffers used to receive
1154 * a MAD to the access layer
1155 */
1156void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1157{
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001158 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 struct ib_mad_private_header *mad_priv_hdr;
1160 struct ib_mad_private *priv;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001161 struct list_head free_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001163 INIT_LIST_HEAD(&free_list);
1164 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001166 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1167 &free_list, list) {
1168 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1169 recv_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 mad_priv_hdr = container_of(mad_recv_wc,
1171 struct ib_mad_private_header,
1172 recv_wc);
1173 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1174 header);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001175 kmem_cache_free(ib_mad_cache, priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177}
1178EXPORT_SYMBOL(ib_free_recv_mad);
1179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1181 u8 rmpp_version,
1182 ib_mad_send_handler send_handler,
1183 ib_mad_recv_handler recv_handler,
1184 void *context)
1185{
1186 return ERR_PTR(-EINVAL); /* XXX: for now */
1187}
1188EXPORT_SYMBOL(ib_redirect_mad_qp);
1189
1190int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1191 struct ib_wc *wc)
1192{
1193 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1194 return 0;
1195}
1196EXPORT_SYMBOL(ib_process_mad_wc);
1197
1198static int method_in_use(struct ib_mad_mgmt_method_table **method,
1199 struct ib_mad_reg_req *mad_reg_req)
1200{
1201 int i;
1202
Akinobu Mita19b629f2010-03-05 13:41:38 -08001203 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 if ((*method)->agent[i]) {
1205 printk(KERN_ERR PFX "Method %d already in use\n", i);
1206 return -EINVAL;
1207 }
1208 }
1209 return 0;
1210}
1211
1212static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1213{
1214 /* Allocate management method table */
Roland Dreierde6eb662005-11-02 07:23:14 -08001215 *method = kzalloc(sizeof **method, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 if (!*method) {
1217 printk(KERN_ERR PFX "No memory for "
1218 "ib_mad_mgmt_method_table\n");
1219 return -ENOMEM;
1220 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
1222 return 0;
1223}
1224
1225/*
1226 * Check to see if there are any methods still in use
1227 */
1228static int check_method_table(struct ib_mad_mgmt_method_table *method)
1229{
1230 int i;
1231
1232 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1233 if (method->agent[i])
1234 return 1;
1235 return 0;
1236}
1237
1238/*
1239 * Check to see if there are any method tables for this class still in use
1240 */
1241static int check_class_table(struct ib_mad_mgmt_class_table *class)
1242{
1243 int i;
1244
1245 for (i = 0; i < MAX_MGMT_CLASS; i++)
1246 if (class->method_table[i])
1247 return 1;
1248 return 0;
1249}
1250
1251static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1252{
1253 int i;
1254
1255 for (i = 0; i < MAX_MGMT_OUI; i++)
1256 if (vendor_class->method_table[i])
1257 return 1;
1258 return 0;
1259}
1260
1261static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1262 char *oui)
1263{
1264 int i;
1265
1266 for (i = 0; i < MAX_MGMT_OUI; i++)
Roland Dreier3cd96562006-09-22 15:22:46 -07001267 /* Is there matching OUI for this vendor class ? */
1268 if (!memcmp(vendor_class->oui[i], oui, 3))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 return i;
1270
1271 return -1;
1272}
1273
1274static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1275{
1276 int i;
1277
1278 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1279 if (vendor->vendor_class[i])
1280 return 1;
1281
1282 return 0;
1283}
1284
1285static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1286 struct ib_mad_agent_private *agent)
1287{
1288 int i;
1289
1290 /* Remove any methods for this mad agent */
1291 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1292 if (method->agent[i] == agent) {
1293 method->agent[i] = NULL;
1294 }
1295 }
1296}
1297
1298static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1299 struct ib_mad_agent_private *agent_priv,
1300 u8 mgmt_class)
1301{
1302 struct ib_mad_port_private *port_priv;
1303 struct ib_mad_mgmt_class_table **class;
1304 struct ib_mad_mgmt_method_table **method;
1305 int i, ret;
1306
1307 port_priv = agent_priv->qp_info->port_priv;
1308 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1309 if (!*class) {
1310 /* Allocate management class table for "new" class version */
Roland Dreierde6eb662005-11-02 07:23:14 -08001311 *class = kzalloc(sizeof **class, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 if (!*class) {
1313 printk(KERN_ERR PFX "No memory for "
1314 "ib_mad_mgmt_class_table\n");
1315 ret = -ENOMEM;
1316 goto error1;
1317 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001318
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 /* Allocate method table for this management class */
1320 method = &(*class)->method_table[mgmt_class];
1321 if ((ret = allocate_method_table(method)))
1322 goto error2;
1323 } else {
1324 method = &(*class)->method_table[mgmt_class];
1325 if (!*method) {
1326 /* Allocate method table for this management class */
1327 if ((ret = allocate_method_table(method)))
1328 goto error1;
1329 }
1330 }
1331
1332 /* Now, make sure methods are not already in use */
1333 if (method_in_use(method, mad_reg_req))
1334 goto error3;
1335
1336 /* Finally, add in methods being registered */
Akinobu Mita19b629f2010-03-05 13:41:38 -08001337 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 (*method)->agent[i] = agent_priv;
Akinobu Mita19b629f2010-03-05 13:41:38 -08001339
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 return 0;
1341
1342error3:
1343 /* Remove any methods for this mad agent */
1344 remove_methods_mad_agent(*method, agent_priv);
1345 /* Now, check to see if there are any methods in use */
1346 if (!check_method_table(*method)) {
1347 /* If not, release management method table */
1348 kfree(*method);
1349 *method = NULL;
1350 }
1351 ret = -EINVAL;
1352 goto error1;
1353error2:
1354 kfree(*class);
1355 *class = NULL;
1356error1:
1357 return ret;
1358}
1359
1360static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1361 struct ib_mad_agent_private *agent_priv)
1362{
1363 struct ib_mad_port_private *port_priv;
1364 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1365 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1366 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1367 struct ib_mad_mgmt_method_table **method;
1368 int i, ret = -ENOMEM;
1369 u8 vclass;
1370
1371 /* "New" vendor (with OUI) class */
1372 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1373 port_priv = agent_priv->qp_info->port_priv;
1374 vendor_table = &port_priv->version[
1375 mad_reg_req->mgmt_class_version].vendor;
1376 if (!*vendor_table) {
1377 /* Allocate mgmt vendor class table for "new" class version */
Roland Dreierde6eb662005-11-02 07:23:14 -08001378 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 if (!vendor) {
1380 printk(KERN_ERR PFX "No memory for "
1381 "ib_mad_mgmt_vendor_class_table\n");
1382 goto error1;
1383 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001384
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 *vendor_table = vendor;
1386 }
1387 if (!(*vendor_table)->vendor_class[vclass]) {
1388 /* Allocate table for this management vendor class */
Roland Dreierde6eb662005-11-02 07:23:14 -08001389 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 if (!vendor_class) {
1391 printk(KERN_ERR PFX "No memory for "
1392 "ib_mad_mgmt_vendor_class\n");
1393 goto error2;
1394 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001395
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 (*vendor_table)->vendor_class[vclass] = vendor_class;
1397 }
1398 for (i = 0; i < MAX_MGMT_OUI; i++) {
1399 /* Is there matching OUI for this vendor class ? */
1400 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1401 mad_reg_req->oui, 3)) {
1402 method = &(*vendor_table)->vendor_class[
1403 vclass]->method_table[i];
1404 BUG_ON(!*method);
1405 goto check_in_use;
1406 }
1407 }
1408 for (i = 0; i < MAX_MGMT_OUI; i++) {
1409 /* OUI slot available ? */
1410 if (!is_vendor_oui((*vendor_table)->vendor_class[
1411 vclass]->oui[i])) {
1412 method = &(*vendor_table)->vendor_class[
1413 vclass]->method_table[i];
1414 BUG_ON(*method);
1415 /* Allocate method table for this OUI */
1416 if ((ret = allocate_method_table(method)))
1417 goto error3;
1418 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1419 mad_reg_req->oui, 3);
1420 goto check_in_use;
1421 }
1422 }
1423 printk(KERN_ERR PFX "All OUI slots in use\n");
1424 goto error3;
1425
1426check_in_use:
1427 /* Now, make sure methods are not already in use */
1428 if (method_in_use(method, mad_reg_req))
1429 goto error4;
1430
1431 /* Finally, add in methods being registered */
Akinobu Mita19b629f2010-03-05 13:41:38 -08001432 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 (*method)->agent[i] = agent_priv;
Akinobu Mita19b629f2010-03-05 13:41:38 -08001434
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 return 0;
1436
1437error4:
1438 /* Remove any methods for this mad agent */
1439 remove_methods_mad_agent(*method, agent_priv);
1440 /* Now, check to see if there are any methods in use */
1441 if (!check_method_table(*method)) {
1442 /* If not, release management method table */
1443 kfree(*method);
1444 *method = NULL;
1445 }
1446 ret = -EINVAL;
1447error3:
1448 if (vendor_class) {
1449 (*vendor_table)->vendor_class[vclass] = NULL;
1450 kfree(vendor_class);
1451 }
1452error2:
1453 if (vendor) {
1454 *vendor_table = NULL;
1455 kfree(vendor);
1456 }
1457error1:
1458 return ret;
1459}
1460
1461static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1462{
1463 struct ib_mad_port_private *port_priv;
1464 struct ib_mad_mgmt_class_table *class;
1465 struct ib_mad_mgmt_method_table *method;
1466 struct ib_mad_mgmt_vendor_class_table *vendor;
1467 struct ib_mad_mgmt_vendor_class *vendor_class;
1468 int index;
1469 u8 mgmt_class;
1470
1471 /*
1472 * Was MAD registration request supplied
1473 * with original registration ?
1474 */
1475 if (!agent_priv->reg_req) {
1476 goto out;
1477 }
1478
1479 port_priv = agent_priv->qp_info->port_priv;
1480 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1481 class = port_priv->version[
1482 agent_priv->reg_req->mgmt_class_version].class;
1483 if (!class)
1484 goto vendor_check;
1485
1486 method = class->method_table[mgmt_class];
1487 if (method) {
1488 /* Remove any methods for this mad agent */
1489 remove_methods_mad_agent(method, agent_priv);
1490 /* Now, check to see if there are any methods still in use */
1491 if (!check_method_table(method)) {
1492 /* If not, release management method table */
1493 kfree(method);
1494 class->method_table[mgmt_class] = NULL;
1495 /* Any management classes left ? */
1496 if (!check_class_table(class)) {
1497 /* If not, release management class table */
1498 kfree(class);
1499 port_priv->version[
1500 agent_priv->reg_req->
1501 mgmt_class_version].class = NULL;
1502 }
1503 }
1504 }
1505
1506vendor_check:
1507 if (!is_vendor_class(mgmt_class))
1508 goto out;
1509
1510 /* normalize mgmt_class to vendor range 2 */
1511 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1512 vendor = port_priv->version[
1513 agent_priv->reg_req->mgmt_class_version].vendor;
1514
1515 if (!vendor)
1516 goto out;
1517
1518 vendor_class = vendor->vendor_class[mgmt_class];
1519 if (vendor_class) {
1520 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1521 if (index < 0)
1522 goto out;
1523 method = vendor_class->method_table[index];
1524 if (method) {
1525 /* Remove any methods for this mad agent */
1526 remove_methods_mad_agent(method, agent_priv);
1527 /*
1528 * Now, check to see if there are
1529 * any methods still in use
1530 */
1531 if (!check_method_table(method)) {
1532 /* If not, release management method table */
1533 kfree(method);
1534 vendor_class->method_table[index] = NULL;
1535 memset(vendor_class->oui[index], 0, 3);
1536 /* Any OUIs left ? */
1537 if (!check_vendor_class(vendor_class)) {
1538 /* If not, release vendor class table */
1539 kfree(vendor_class);
1540 vendor->vendor_class[mgmt_class] = NULL;
1541 /* Any other vendor classes left ? */
1542 if (!check_vendor_table(vendor)) {
1543 kfree(vendor);
1544 port_priv->version[
1545 agent_priv->reg_req->
1546 mgmt_class_version].
1547 vendor = NULL;
1548 }
1549 }
1550 }
1551 }
1552 }
1553
1554out:
1555 return;
1556}
1557
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558static struct ib_mad_agent_private *
1559find_mad_agent(struct ib_mad_port_private *port_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001560 struct ib_mad *mad)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561{
1562 struct ib_mad_agent_private *mad_agent = NULL;
1563 unsigned long flags;
1564
1565 spin_lock_irqsave(&port_priv->reg_lock, flags);
Sean Hefty2527e682006-07-20 11:25:50 +03001566 if (ib_response_mad(mad)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 u32 hi_tid;
1568 struct ib_mad_agent_private *entry;
1569
1570 /*
1571 * Routing is based on high 32 bits of transaction ID
1572 * of MAD.
1573 */
1574 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
Sean Hefty34816ad2005-10-25 10:51:39 -07001575 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 if (entry->agent.hi_tid == hi_tid) {
1577 mad_agent = entry;
1578 break;
1579 }
1580 }
1581 } else {
1582 struct ib_mad_mgmt_class_table *class;
1583 struct ib_mad_mgmt_method_table *method;
1584 struct ib_mad_mgmt_vendor_class_table *vendor;
1585 struct ib_mad_mgmt_vendor_class *vendor_class;
1586 struct ib_vendor_mad *vendor_mad;
1587 int index;
1588
1589 /*
1590 * Routing is based on version, class, and method
1591 * For "newer" vendor MADs, also based on OUI
1592 */
1593 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1594 goto out;
1595 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1596 class = port_priv->version[
1597 mad->mad_hdr.class_version].class;
1598 if (!class)
1599 goto out;
1600 method = class->method_table[convert_mgmt_class(
1601 mad->mad_hdr.mgmt_class)];
1602 if (method)
1603 mad_agent = method->agent[mad->mad_hdr.method &
1604 ~IB_MGMT_METHOD_RESP];
1605 } else {
1606 vendor = port_priv->version[
1607 mad->mad_hdr.class_version].vendor;
1608 if (!vendor)
1609 goto out;
1610 vendor_class = vendor->vendor_class[vendor_class_index(
1611 mad->mad_hdr.mgmt_class)];
1612 if (!vendor_class)
1613 goto out;
1614 /* Find matching OUI */
1615 vendor_mad = (struct ib_vendor_mad *)mad;
1616 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1617 if (index == -1)
1618 goto out;
1619 method = vendor_class->method_table[index];
1620 if (method) {
1621 mad_agent = method->agent[mad->mad_hdr.method &
1622 ~IB_MGMT_METHOD_RESP];
1623 }
1624 }
1625 }
1626
1627 if (mad_agent) {
1628 if (mad_agent->agent.recv_handler)
1629 atomic_inc(&mad_agent->refcount);
1630 else {
1631 printk(KERN_NOTICE PFX "No receive handler for client "
1632 "%p on port %d\n",
1633 &mad_agent->agent, port_priv->port_num);
1634 mad_agent = NULL;
1635 }
1636 }
1637out:
1638 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1639
1640 return mad_agent;
1641}
1642
1643static int validate_mad(struct ib_mad *mad, u32 qp_num)
1644{
1645 int valid = 0;
1646
1647 /* Make sure MAD base version is understood */
1648 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1649 printk(KERN_ERR PFX "MAD received with unsupported base "
1650 "version %d\n", mad->mad_hdr.base_version);
1651 goto out;
1652 }
1653
1654 /* Filter SMI packets sent to other than QP0 */
1655 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1656 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1657 if (qp_num == 0)
1658 valid = 1;
1659 } else {
1660 /* Filter GSI packets sent to QP0 */
1661 if (qp_num != 0)
1662 valid = 1;
1663 }
1664
1665out:
1666 return valid;
1667}
1668
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001669static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1670 struct ib_mad_hdr *mad_hdr)
1671{
1672 struct ib_rmpp_mad *rmpp_mad;
1673
1674 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1675 return !mad_agent_priv->agent.rmpp_version ||
1676 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1677 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1678 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1679}
1680
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001681static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1682 struct ib_mad_recv_wc *rwc)
1683{
1684 return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1685 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1686}
1687
Jack Morgenstein9874e742006-06-17 20:37:34 -07001688static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1689 struct ib_mad_send_wr_private *wr,
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001690 struct ib_mad_recv_wc *rwc )
1691{
1692 struct ib_ah_attr attr;
1693 u8 send_resp, rcv_resp;
Jack Morgenstein9874e742006-06-17 20:37:34 -07001694 union ib_gid sgid;
1695 struct ib_device *device = mad_agent_priv->agent.device;
1696 u8 port_num = mad_agent_priv->agent.port_num;
1697 u8 lmc;
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001698
Michael Brooks70972282008-09-20 20:06:16 -07001699 send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
1700 rcv_resp = ib_response_mad(rwc->recv_buf.mad);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001701
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001702 if (send_resp == rcv_resp)
1703 /* both requests, or both responses. GIDs different */
1704 return 0;
1705
1706 if (ib_query_ah(wr->send_buf.ah, &attr))
1707 /* Assume not equal, to avoid false positives. */
1708 return 0;
1709
Jack Morgenstein9874e742006-06-17 20:37:34 -07001710 if (!!(attr.ah_flags & IB_AH_GRH) !=
1711 !!(rwc->wc->wc_flags & IB_WC_GRH))
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001712 /* one has GID, other does not. Assume different */
1713 return 0;
Jack Morgenstein9874e742006-06-17 20:37:34 -07001714
1715 if (!send_resp && rcv_resp) {
1716 /* is request/response. */
1717 if (!(attr.ah_flags & IB_AH_GRH)) {
1718 if (ib_get_cached_lmc(device, port_num, &lmc))
1719 return 0;
1720 return (!lmc || !((attr.src_path_bits ^
1721 rwc->wc->dlid_path_bits) &
1722 ((1 << lmc) - 1)));
1723 } else {
1724 if (ib_get_cached_gid(device, port_num,
1725 attr.grh.sgid_index, &sgid))
1726 return 0;
1727 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1728 16);
1729 }
1730 }
1731
1732 if (!(attr.ah_flags & IB_AH_GRH))
1733 return attr.dlid == rwc->wc->slid;
1734 else
1735 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1736 16);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001737}
Jack Morgenstein9874e742006-06-17 20:37:34 -07001738
1739static inline int is_direct(u8 class)
1740{
1741 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1742}
1743
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001744struct ib_mad_send_wr_private*
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001745ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
Jack Morgenstein9874e742006-06-17 20:37:34 -07001746 struct ib_mad_recv_wc *wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747{
Jack Morgenstein9874e742006-06-17 20:37:34 -07001748 struct ib_mad_send_wr_private *wr;
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001749 struct ib_mad *mad;
1750
Jack Morgenstein9874e742006-06-17 20:37:34 -07001751 mad = (struct ib_mad *)wc->recv_buf.mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
Jack Morgenstein9874e742006-06-17 20:37:34 -07001753 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1754 if ((wr->tid == mad->mad_hdr.tid) &&
1755 rcv_has_same_class(wr, wc) &&
1756 /*
1757 * Don't check GID for direct routed MADs.
1758 * These might have permissive LIDs.
1759 */
1760 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1761 rcv_has_same_gid(mad_agent_priv, wr, wc)))
Roland Dreier39798692006-11-13 09:38:07 -08001762 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 }
1764
1765 /*
1766 * It's possible to receive the response before we've
1767 * been notified that the send has completed
1768 */
Jack Morgenstein9874e742006-06-17 20:37:34 -07001769 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1770 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1771 wr->tid == mad->mad_hdr.tid &&
1772 wr->timeout &&
1773 rcv_has_same_class(wr, wc) &&
1774 /*
1775 * Don't check GID for direct routed MADs.
1776 * These might have permissive LIDs.
1777 */
1778 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1779 rcv_has_same_gid(mad_agent_priv, wr, wc)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 /* Verify request has not been canceled */
Jack Morgenstein9874e742006-06-17 20:37:34 -07001781 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 }
1783 return NULL;
1784}
1785
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001786void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001787{
1788 mad_send_wr->timeout = 0;
Akinobu Mita179e0912006-06-26 00:24:41 -07001789 if (mad_send_wr->refcount == 1)
1790 list_move_tail(&mad_send_wr->agent_list,
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001791 &mad_send_wr->mad_agent_priv->done_list);
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001792}
1793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001795 struct ib_mad_recv_wc *mad_recv_wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796{
1797 struct ib_mad_send_wr_private *mad_send_wr;
1798 struct ib_mad_send_wc mad_send_wc;
1799 unsigned long flags;
1800
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001801 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1802 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1803 if (mad_agent_priv->agent.rmpp_version) {
1804 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1805 mad_recv_wc);
1806 if (!mad_recv_wc) {
Sean Hefty1b52fa982006-05-12 14:57:52 -07001807 deref_mad_agent(mad_agent_priv);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001808 return;
1809 }
1810 }
1811
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 /* Complete corresponding request */
Sean Hefty2527e682006-07-20 11:25:50 +03001813 if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001815 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 if (!mad_send_wr) {
1817 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001818 ib_free_recv_mad(mad_recv_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -07001819 deref_mad_agent(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 return;
1821 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001822 ib_mark_mad_done(mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1824
1825 /* Defined behavior is to complete response before request */
Sean Hefty34816ad2005-10-25 10:51:39 -07001826 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001827 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1828 mad_recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 atomic_dec(&mad_agent_priv->refcount);
1830
1831 mad_send_wc.status = IB_WC_SUCCESS;
1832 mad_send_wc.vendor_err = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001833 mad_send_wc.send_buf = &mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1835 } else {
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001836 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1837 mad_recv_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -07001838 deref_mad_agent(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 }
1840}
1841
1842static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1843 struct ib_wc *wc)
1844{
1845 struct ib_mad_qp_info *qp_info;
1846 struct ib_mad_private_header *mad_priv_hdr;
Hal Rosenstock445d6802007-08-03 10:45:17 -07001847 struct ib_mad_private *recv, *response = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 struct ib_mad_list_head *mad_list;
1849 struct ib_mad_agent_private *mad_agent;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001850 int port_num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1853 qp_info = mad_list->mad_queue->qp_info;
1854 dequeue_mad(mad_list);
1855
1856 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1857 mad_list);
1858 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
Ralph Campbell15271062006-12-12 14:28:30 -08001859 ib_dma_unmap_single(port_priv->device,
1860 recv->header.mapping,
1861 sizeof(struct ib_mad_private) -
1862 sizeof(struct ib_mad_private_header),
1863 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864
1865 /* Setup MAD receive work completion from "normal" work completion */
Sean Hefty24239af2005-04-16 15:26:08 -07001866 recv->header.wc = *wc;
1867 recv->header.recv_wc.wc = &recv->header.wc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1869 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1870 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1871
1872 if (atomic_read(&qp_info->snoop_count))
1873 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1874
1875 /* Validate MAD */
1876 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1877 goto out;
1878
Hal Rosenstock445d6802007-08-03 10:45:17 -07001879 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1880 if (!response) {
1881 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1882 "for response buffer\n");
1883 goto out;
1884 }
1885
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001886 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1887 port_num = wc->port_num;
1888 else
1889 port_num = port_priv->port_num;
1890
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 if (recv->mad.mad.mad_hdr.mgmt_class ==
1892 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001893 enum smi_forward_action retsmi;
1894
Hal Rosenstockde493d42007-04-02 11:24:07 -04001895 if (smi_handle_dr_smp_recv(&recv->mad.smp,
1896 port_priv->device->node_type,
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001897 port_num,
Hal Rosenstockde493d42007-04-02 11:24:07 -04001898 port_priv->device->phys_port_cnt) ==
1899 IB_SMI_DISCARD)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 goto out;
Hal Rosenstockde493d42007-04-02 11:24:07 -04001901
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001902 retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1903 if (retsmi == IB_SMI_LOCAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 goto local;
Hal Rosenstockde493d42007-04-02 11:24:07 -04001905
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001906 if (retsmi == IB_SMI_SEND) { /* don't forward */
1907 if (smi_handle_dr_smp_send(&recv->mad.smp,
1908 port_priv->device->node_type,
1909 port_num) == IB_SMI_DISCARD)
1910 goto out;
Hal Rosenstockde493d42007-04-02 11:24:07 -04001911
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001912 if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1913 goto out;
1914 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1915 /* forward case for switches */
1916 memcpy(response, recv, sizeof(*response));
1917 response->header.recv_wc.wc = &response->header.wc;
1918 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1919 response->header.recv_wc.recv_buf.grh = &response->grh;
1920
Hal Rosenstock86dfbec2007-08-03 10:45:17 -07001921 agent_send_response(&response->mad.mad,
1922 &response->grh, wc,
1923 port_priv->device,
1924 smi_get_fwd_port(&recv->mad.smp),
1925 qp_info->qp->qp_num);
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001926
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 goto out;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001928 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 }
1930
1931local:
1932 /* Give driver "right of first refusal" on incoming MAD */
1933 if (port_priv->device->process_mad) {
1934 int ret;
1935
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 ret = port_priv->device->process_mad(port_priv->device, 0,
1937 port_priv->port_num,
1938 wc, &recv->grh,
1939 &recv->mad.mad,
1940 &response->mad.mad);
1941 if (ret & IB_MAD_RESULT_SUCCESS) {
1942 if (ret & IB_MAD_RESULT_CONSUMED)
1943 goto out;
1944 if (ret & IB_MAD_RESULT_REPLY) {
Sean Hefty34816ad2005-10-25 10:51:39 -07001945 agent_send_response(&response->mad.mad,
1946 &recv->grh, wc,
1947 port_priv->device,
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04001948 port_num,
Sean Hefty34816ad2005-10-25 10:51:39 -07001949 qp_info->qp->qp_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 goto out;
1951 }
1952 }
1953 }
1954
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001955 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 if (mad_agent) {
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001957 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 /*
1959 * recv is freed up in error cases in ib_mad_complete_recv
1960 * or via recv_handler in ib_mad_complete_recv()
1961 */
1962 recv = NULL;
1963 }
1964
1965out:
1966 /* Post another receive request for this QP */
1967 if (response) {
1968 ib_mad_post_receive_mads(qp_info, response);
1969 if (recv)
1970 kmem_cache_free(ib_mad_cache, recv);
1971 } else
1972 ib_mad_post_receive_mads(qp_info, recv);
1973}
1974
1975static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1976{
1977 struct ib_mad_send_wr_private *mad_send_wr;
1978 unsigned long delay;
1979
1980 if (list_empty(&mad_agent_priv->wait_list)) {
Roland Dreier6b2eef82009-09-07 08:27:50 -07001981 __cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 } else {
1983 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1984 struct ib_mad_send_wr_private,
1985 agent_list);
1986
1987 if (time_after(mad_agent_priv->timeout,
1988 mad_send_wr->timeout)) {
1989 mad_agent_priv->timeout = mad_send_wr->timeout;
Roland Dreier6b2eef82009-09-07 08:27:50 -07001990 __cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 delay = mad_send_wr->timeout - jiffies;
1992 if ((long)delay <= 0)
1993 delay = 1;
1994 queue_delayed_work(mad_agent_priv->qp_info->
1995 port_priv->wq,
1996 &mad_agent_priv->timed_work, delay);
1997 }
1998 }
1999}
2000
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002001static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002{
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002003 struct ib_mad_agent_private *mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 struct ib_mad_send_wr_private *temp_mad_send_wr;
2005 struct list_head *list_item;
2006 unsigned long delay;
2007
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002008 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 list_del(&mad_send_wr->agent_list);
2010
2011 delay = mad_send_wr->timeout;
2012 mad_send_wr->timeout += jiffies;
2013
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002014 if (delay) {
2015 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2016 temp_mad_send_wr = list_entry(list_item,
2017 struct ib_mad_send_wr_private,
2018 agent_list);
2019 if (time_after(mad_send_wr->timeout,
2020 temp_mad_send_wr->timeout))
2021 break;
2022 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 }
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002024 else
2025 list_item = &mad_agent_priv->wait_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 list_add(&mad_send_wr->agent_list, list_item);
2027
2028 /* Reschedule a work item if we have a shorter timeout */
2029 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
Roland Dreier6b2eef82009-09-07 08:27:50 -07002030 __cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2032 &mad_agent_priv->timed_work, delay);
2033 }
2034}
2035
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002036void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2037 int timeout_ms)
2038{
2039 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2040 wait_for_response(mad_send_wr);
2041}
2042
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043/*
2044 * Process a send work completion
2045 */
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002046void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2047 struct ib_mad_send_wc *mad_send_wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048{
2049 struct ib_mad_agent_private *mad_agent_priv;
2050 unsigned long flags;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002051 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002053 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002055 if (mad_agent_priv->agent.rmpp_version) {
2056 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2057 if (ret == IB_RMPP_RESULT_CONSUMED)
2058 goto done;
2059 } else
2060 ret = IB_RMPP_RESULT_UNHANDLED;
2061
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 if (mad_send_wc->status != IB_WC_SUCCESS &&
2063 mad_send_wr->status == IB_WC_SUCCESS) {
2064 mad_send_wr->status = mad_send_wc->status;
2065 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2066 }
2067
2068 if (--mad_send_wr->refcount > 0) {
2069 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2070 mad_send_wr->status == IB_WC_SUCCESS) {
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002071 wait_for_response(mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002073 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 }
2075
2076 /* Remove send from MAD agent and notify client of completion */
2077 list_del(&mad_send_wr->agent_list);
2078 adjust_timeout(mad_agent_priv);
2079 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2080
2081 if (mad_send_wr->status != IB_WC_SUCCESS )
2082 mad_send_wc->status = mad_send_wr->status;
Sean Hefty34816ad2005-10-25 10:51:39 -07002083 if (ret == IB_RMPP_RESULT_INTERNAL)
2084 ib_rmpp_send_handler(mad_send_wc);
2085 else
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002086 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2087 mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088
2089 /* Release reference on agent taken when sending */
Sean Hefty1b52fa982006-05-12 14:57:52 -07002090 deref_mad_agent(mad_agent_priv);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002091 return;
2092done:
2093 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094}
2095
2096static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2097 struct ib_wc *wc)
2098{
2099 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2100 struct ib_mad_list_head *mad_list;
2101 struct ib_mad_qp_info *qp_info;
2102 struct ib_mad_queue *send_queue;
2103 struct ib_send_wr *bad_send_wr;
Sean Hefty34816ad2005-10-25 10:51:39 -07002104 struct ib_mad_send_wc mad_send_wc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 unsigned long flags;
2106 int ret;
2107
2108 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2109 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2110 mad_list);
2111 send_queue = mad_list->mad_queue;
2112 qp_info = send_queue->qp_info;
2113
2114retry:
Ralph Campbell15271062006-12-12 14:28:30 -08002115 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2116 mad_send_wr->header_mapping,
2117 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2118 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2119 mad_send_wr->payload_mapping,
2120 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 queued_send_wr = NULL;
2122 spin_lock_irqsave(&send_queue->lock, flags);
2123 list_del(&mad_list->list);
2124
2125 /* Move queued send to the send queue */
2126 if (send_queue->count-- > send_queue->max_active) {
2127 mad_list = container_of(qp_info->overflow_list.next,
2128 struct ib_mad_list_head, list);
2129 queued_send_wr = container_of(mad_list,
2130 struct ib_mad_send_wr_private,
2131 mad_list);
Akinobu Mita179e0912006-06-26 00:24:41 -07002132 list_move_tail(&mad_list->list, &send_queue->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 }
2134 spin_unlock_irqrestore(&send_queue->lock, flags);
2135
Sean Hefty34816ad2005-10-25 10:51:39 -07002136 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2137 mad_send_wc.status = wc->status;
2138 mad_send_wc.vendor_err = wc->vendor_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 if (atomic_read(&qp_info->snoop_count))
Sean Hefty34816ad2005-10-25 10:51:39 -07002140 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 IB_MAD_SNOOP_SEND_COMPLETIONS);
Sean Hefty34816ad2005-10-25 10:51:39 -07002142 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143
2144 if (queued_send_wr) {
2145 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
Sean Hefty34816ad2005-10-25 10:51:39 -07002146 &bad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 if (ret) {
2148 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
2149 mad_send_wr = queued_send_wr;
2150 wc->status = IB_WC_LOC_QP_OP_ERR;
2151 goto retry;
2152 }
2153 }
2154}
2155
2156static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2157{
2158 struct ib_mad_send_wr_private *mad_send_wr;
2159 struct ib_mad_list_head *mad_list;
2160 unsigned long flags;
2161
2162 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2163 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2164 mad_send_wr = container_of(mad_list,
2165 struct ib_mad_send_wr_private,
2166 mad_list);
2167 mad_send_wr->retry = 1;
2168 }
2169 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2170}
2171
2172static void mad_error_handler(struct ib_mad_port_private *port_priv,
2173 struct ib_wc *wc)
2174{
2175 struct ib_mad_list_head *mad_list;
2176 struct ib_mad_qp_info *qp_info;
2177 struct ib_mad_send_wr_private *mad_send_wr;
2178 int ret;
2179
2180 /* Determine if failure was a send or receive */
2181 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2182 qp_info = mad_list->mad_queue->qp_info;
2183 if (mad_list->mad_queue == &qp_info->recv_queue)
2184 /*
2185 * Receive errors indicate that the QP has entered the error
2186 * state - error handling/shutdown code will cleanup
2187 */
2188 return;
2189
2190 /*
2191 * Send errors will transition the QP to SQE - move
2192 * QP to RTS and repost flushed work requests
2193 */
2194 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2195 mad_list);
2196 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2197 if (mad_send_wr->retry) {
2198 /* Repost send */
2199 struct ib_send_wr *bad_send_wr;
2200
2201 mad_send_wr->retry = 0;
2202 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2203 &bad_send_wr);
2204 if (ret)
2205 ib_mad_send_done_handler(port_priv, wc);
2206 } else
2207 ib_mad_send_done_handler(port_priv, wc);
2208 } else {
2209 struct ib_qp_attr *attr;
2210
2211 /* Transition QP to RTS and fail offending send */
2212 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2213 if (attr) {
2214 attr->qp_state = IB_QPS_RTS;
2215 attr->cur_qp_state = IB_QPS_SQE;
2216 ret = ib_modify_qp(qp_info->qp, attr,
2217 IB_QP_STATE | IB_QP_CUR_STATE);
2218 kfree(attr);
2219 if (ret)
2220 printk(KERN_ERR PFX "mad_error_handler - "
2221 "ib_modify_qp to RTS : %d\n", ret);
2222 else
2223 mark_sends_for_retry(qp_info);
2224 }
2225 ib_mad_send_done_handler(port_priv, wc);
2226 }
2227}
2228
2229/*
2230 * IB MAD completion callback
2231 */
David Howellsc4028952006-11-22 14:57:56 +00002232static void ib_mad_completion_handler(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233{
2234 struct ib_mad_port_private *port_priv;
2235 struct ib_wc wc;
2236
David Howellsc4028952006-11-22 14:57:56 +00002237 port_priv = container_of(work, struct ib_mad_port_private, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2239
2240 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2241 if (wc.status == IB_WC_SUCCESS) {
2242 switch (wc.opcode) {
2243 case IB_WC_SEND:
2244 ib_mad_send_done_handler(port_priv, &wc);
2245 break;
2246 case IB_WC_RECV:
2247 ib_mad_recv_done_handler(port_priv, &wc);
2248 break;
2249 default:
2250 BUG_ON(1);
2251 break;
2252 }
2253 } else
2254 mad_error_handler(port_priv, &wc);
2255 }
2256}
2257
2258static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2259{
2260 unsigned long flags;
2261 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2262 struct ib_mad_send_wc mad_send_wc;
2263 struct list_head cancel_list;
2264
2265 INIT_LIST_HEAD(&cancel_list);
2266
2267 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2268 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2269 &mad_agent_priv->send_list, agent_list) {
2270 if (mad_send_wr->status == IB_WC_SUCCESS) {
Roland Dreier3cd96562006-09-22 15:22:46 -07002271 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2273 }
2274 }
2275
2276 /* Empty wait list to prevent receives from finding a request */
2277 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2278 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2279
2280 /* Report all cancelled requests */
2281 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2282 mad_send_wc.vendor_err = 0;
2283
2284 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2285 &cancel_list, agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002286 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2287 list_del(&mad_send_wr->agent_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2289 &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 atomic_dec(&mad_agent_priv->refcount);
2291 }
2292}
2293
2294static struct ib_mad_send_wr_private*
Sean Hefty34816ad2005-10-25 10:51:39 -07002295find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2296 struct ib_mad_send_buf *send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297{
2298 struct ib_mad_send_wr_private *mad_send_wr;
2299
2300 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2301 agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002302 if (&mad_send_wr->send_buf == send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 return mad_send_wr;
2304 }
2305
2306 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2307 agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002308 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2309 &mad_send_wr->send_buf == send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 return mad_send_wr;
2311 }
2312 return NULL;
2313}
2314
Sean Hefty34816ad2005-10-25 10:51:39 -07002315int ib_modify_mad(struct ib_mad_agent *mad_agent,
2316 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317{
2318 struct ib_mad_agent_private *mad_agent_priv;
2319 struct ib_mad_send_wr_private *mad_send_wr;
2320 unsigned long flags;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002321 int active;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322
2323 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2324 agent);
2325 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Sean Hefty34816ad2005-10-25 10:51:39 -07002326 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002327 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002329 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 }
2331
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002332 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002333 if (!timeout_ms) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002335 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 }
2337
Sean Hefty34816ad2005-10-25 10:51:39 -07002338 mad_send_wr->send_buf.timeout_ms = timeout_ms;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002339 if (active)
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002340 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2341 else
2342 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002344 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2345 return 0;
2346}
2347EXPORT_SYMBOL(ib_modify_mad);
2348
Sean Hefty34816ad2005-10-25 10:51:39 -07002349void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2350 struct ib_mad_send_buf *send_buf)
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002351{
Sean Hefty34816ad2005-10-25 10:51:39 -07002352 ib_modify_mad(mad_agent, send_buf, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353}
2354EXPORT_SYMBOL(ib_cancel_mad);
2355
David Howellsc4028952006-11-22 14:57:56 +00002356static void local_completions(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357{
2358 struct ib_mad_agent_private *mad_agent_priv;
2359 struct ib_mad_local_private *local;
2360 struct ib_mad_agent_private *recv_mad_agent;
2361 unsigned long flags;
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002362 int free_mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 struct ib_wc wc;
2364 struct ib_mad_send_wc mad_send_wc;
2365
David Howellsc4028952006-11-22 14:57:56 +00002366 mad_agent_priv =
2367 container_of(work, struct ib_mad_agent_private, local_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
2369 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2370 while (!list_empty(&mad_agent_priv->local_list)) {
2371 local = list_entry(mad_agent_priv->local_list.next,
2372 struct ib_mad_local_private,
2373 completion_list);
Michael S. Tsirkin37289ef2006-03-30 15:52:54 +02002374 list_del(&local->completion_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002376 free_mad = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 if (local->mad_priv) {
2378 recv_mad_agent = local->recv_mad_agent;
2379 if (!recv_mad_agent) {
2380 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002381 free_mad = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 goto local_send_completion;
2383 }
2384
2385 /*
2386 * Defined behavior is to complete response
2387 * before request
2388 */
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +02002389 build_smp_wc(recv_mad_agent->agent.qp,
2390 (unsigned long) local->mad_send_wr,
Sean Hefty97f52eb2005-08-13 21:05:57 -07002391 be16_to_cpu(IB_LID_PERMISSIVE),
Sean Hefty34816ad2005-10-25 10:51:39 -07002392 0, recv_mad_agent->agent.port_num, &wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393
2394 local->mad_priv->header.recv_wc.wc = &wc;
2395 local->mad_priv->header.recv_wc.mad_len =
2396 sizeof(struct ib_mad);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002397 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2398 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2399 &local->mad_priv->header.recv_wc.rmpp_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2401 local->mad_priv->header.recv_wc.recv_buf.mad =
2402 &local->mad_priv->mad.mad;
2403 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2404 snoop_recv(recv_mad_agent->qp_info,
2405 &local->mad_priv->header.recv_wc,
2406 IB_MAD_SNOOP_RECVS);
2407 recv_mad_agent->agent.recv_handler(
2408 &recv_mad_agent->agent,
2409 &local->mad_priv->header.recv_wc);
2410 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2411 atomic_dec(&recv_mad_agent->refcount);
2412 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2413 }
2414
2415local_send_completion:
2416 /* Complete send */
2417 mad_send_wc.status = IB_WC_SUCCESS;
2418 mad_send_wc.vendor_err = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07002419 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
Sean Hefty34816ad2005-10-25 10:51:39 -07002421 snoop_send(mad_agent_priv->qp_info,
2422 &local->mad_send_wr->send_buf,
2423 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2425 &mad_send_wc);
2426
2427 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 atomic_dec(&mad_agent_priv->refcount);
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002429 if (free_mad)
Hal Rosenstock2c153b92005-07-27 11:45:31 -07002430 kmem_cache_free(ib_mad_cache, local->mad_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 kfree(local);
2432 }
2433 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2434}
2435
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002436static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2437{
2438 int ret;
2439
Sean Hefty4fc8cd42007-11-27 00:11:04 -08002440 if (!mad_send_wr->retries_left)
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002441 return -ETIMEDOUT;
2442
Sean Hefty4fc8cd42007-11-27 00:11:04 -08002443 mad_send_wr->retries_left--;
2444 mad_send_wr->send_buf.retries++;
2445
Sean Hefty34816ad2005-10-25 10:51:39 -07002446 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002447
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002448 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2449 ret = ib_retry_rmpp(mad_send_wr);
2450 switch (ret) {
2451 case IB_RMPP_RESULT_UNHANDLED:
2452 ret = ib_send_mad(mad_send_wr);
2453 break;
2454 case IB_RMPP_RESULT_CONSUMED:
2455 ret = 0;
2456 break;
2457 default:
2458 ret = -ECOMM;
2459 break;
2460 }
2461 } else
2462 ret = ib_send_mad(mad_send_wr);
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002463
2464 if (!ret) {
2465 mad_send_wr->refcount++;
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002466 list_add_tail(&mad_send_wr->agent_list,
2467 &mad_send_wr->mad_agent_priv->send_list);
2468 }
2469 return ret;
2470}
2471
David Howellsc4028952006-11-22 14:57:56 +00002472static void timeout_sends(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473{
2474 struct ib_mad_agent_private *mad_agent_priv;
2475 struct ib_mad_send_wr_private *mad_send_wr;
2476 struct ib_mad_send_wc mad_send_wc;
2477 unsigned long flags, delay;
2478
David Howellsc4028952006-11-22 14:57:56 +00002479 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2480 timed_work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 mad_send_wc.vendor_err = 0;
2482
2483 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2484 while (!list_empty(&mad_agent_priv->wait_list)) {
2485 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2486 struct ib_mad_send_wr_private,
2487 agent_list);
2488
2489 if (time_after(mad_send_wr->timeout, jiffies)) {
2490 delay = mad_send_wr->timeout - jiffies;
2491 if ((long)delay <= 0)
2492 delay = 1;
2493 queue_delayed_work(mad_agent_priv->qp_info->
2494 port_priv->wq,
2495 &mad_agent_priv->timed_work, delay);
2496 break;
2497 }
2498
Hal Rosenstockdbf92272005-07-27 11:45:30 -07002499 list_del(&mad_send_wr->agent_list);
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002500 if (mad_send_wr->status == IB_WC_SUCCESS &&
2501 !retry_send(mad_send_wr))
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002502 continue;
2503
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2505
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002506 if (mad_send_wr->status == IB_WC_SUCCESS)
2507 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2508 else
2509 mad_send_wc.status = mad_send_wr->status;
Sean Hefty34816ad2005-10-25 10:51:39 -07002510 mad_send_wc.send_buf = &mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2512 &mad_send_wc);
2513
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514 atomic_dec(&mad_agent_priv->refcount);
2515 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2516 }
2517 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2518}
2519
Hal Rosenstock5dd2ce12005-08-15 14:16:36 -07002520static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521{
2522 struct ib_mad_port_private *port_priv = cq->cq_context;
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002523 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002525 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2526 if (!list_empty(&port_priv->port_list))
2527 queue_work(port_priv->wq, &port_priv->work);
2528 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529}
2530
2531/*
2532 * Allocate receive MADs and post receive WRs for them
2533 */
2534static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2535 struct ib_mad_private *mad)
2536{
2537 unsigned long flags;
2538 int post, ret;
2539 struct ib_mad_private *mad_priv;
2540 struct ib_sge sg_list;
2541 struct ib_recv_wr recv_wr, *bad_recv_wr;
2542 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2543
2544 /* Initialize common scatter list fields */
2545 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2546 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2547
2548 /* Initialize common receive WR fields */
2549 recv_wr.next = NULL;
2550 recv_wr.sg_list = &sg_list;
2551 recv_wr.num_sge = 1;
2552
2553 do {
2554 /* Allocate and map receive buffer */
2555 if (mad) {
2556 mad_priv = mad;
2557 mad = NULL;
2558 } else {
2559 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2560 if (!mad_priv) {
2561 printk(KERN_ERR PFX "No memory for receive buffer\n");
2562 ret = -ENOMEM;
2563 break;
2564 }
2565 }
Ralph Campbell15271062006-12-12 14:28:30 -08002566 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2567 &mad_priv->grh,
2568 sizeof *mad_priv -
2569 sizeof mad_priv->header,
2570 DMA_FROM_DEVICE);
2571 mad_priv->header.mapping = sg_list.addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2573 mad_priv->header.mad_list.mad_queue = recv_queue;
2574
2575 /* Post receive WR */
2576 spin_lock_irqsave(&recv_queue->lock, flags);
2577 post = (++recv_queue->count < recv_queue->max_active);
2578 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2579 spin_unlock_irqrestore(&recv_queue->lock, flags);
2580 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2581 if (ret) {
2582 spin_lock_irqsave(&recv_queue->lock, flags);
2583 list_del(&mad_priv->header.mad_list.list);
2584 recv_queue->count--;
2585 spin_unlock_irqrestore(&recv_queue->lock, flags);
Ralph Campbell15271062006-12-12 14:28:30 -08002586 ib_dma_unmap_single(qp_info->port_priv->device,
2587 mad_priv->header.mapping,
2588 sizeof *mad_priv -
2589 sizeof mad_priv->header,
2590 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 kmem_cache_free(ib_mad_cache, mad_priv);
2592 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2593 break;
2594 }
2595 } while (post);
2596
2597 return ret;
2598}
2599
2600/*
2601 * Return all the posted receive MADs
2602 */
2603static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2604{
2605 struct ib_mad_private_header *mad_priv_hdr;
2606 struct ib_mad_private *recv;
2607 struct ib_mad_list_head *mad_list;
2608
Eli Cohenfac70d52010-09-27 17:51:11 -07002609 if (!qp_info->qp)
2610 return;
2611
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 while (!list_empty(&qp_info->recv_queue.list)) {
2613
2614 mad_list = list_entry(qp_info->recv_queue.list.next,
2615 struct ib_mad_list_head, list);
2616 mad_priv_hdr = container_of(mad_list,
2617 struct ib_mad_private_header,
2618 mad_list);
2619 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2620 header);
2621
2622 /* Remove from posted receive MAD list */
2623 list_del(&mad_list->list);
2624
Ralph Campbell15271062006-12-12 14:28:30 -08002625 ib_dma_unmap_single(qp_info->port_priv->device,
2626 recv->header.mapping,
2627 sizeof(struct ib_mad_private) -
2628 sizeof(struct ib_mad_private_header),
2629 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 kmem_cache_free(ib_mad_cache, recv);
2631 }
2632
2633 qp_info->recv_queue.count = 0;
2634}
2635
2636/*
2637 * Start the port
2638 */
2639static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2640{
2641 int ret, i;
2642 struct ib_qp_attr *attr;
2643 struct ib_qp *qp;
2644
2645 attr = kmalloc(sizeof *attr, GFP_KERNEL);
Roland Dreier3cd96562006-09-22 15:22:46 -07002646 if (!attr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2648 return -ENOMEM;
2649 }
2650
2651 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2652 qp = port_priv->qp_info[i].qp;
Eli Cohenfac70d52010-09-27 17:51:11 -07002653 if (!qp)
2654 continue;
2655
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656 /*
2657 * PKey index for QP1 is irrelevant but
2658 * one is needed for the Reset to Init transition
2659 */
2660 attr->qp_state = IB_QPS_INIT;
2661 attr->pkey_index = 0;
2662 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2663 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2664 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2665 if (ret) {
2666 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2667 "INIT: %d\n", i, ret);
2668 goto out;
2669 }
2670
2671 attr->qp_state = IB_QPS_RTR;
2672 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2673 if (ret) {
2674 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2675 "RTR: %d\n", i, ret);
2676 goto out;
2677 }
2678
2679 attr->qp_state = IB_QPS_RTS;
2680 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2681 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2682 if (ret) {
2683 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2684 "RTS: %d\n", i, ret);
2685 goto out;
2686 }
2687 }
2688
2689 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2690 if (ret) {
2691 printk(KERN_ERR PFX "Failed to request completion "
2692 "notification: %d\n", ret);
2693 goto out;
2694 }
2695
2696 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
Eli Cohenfac70d52010-09-27 17:51:11 -07002697 if (!port_priv->qp_info[i].qp)
2698 continue;
2699
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2701 if (ret) {
2702 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2703 goto out;
2704 }
2705 }
2706out:
2707 kfree(attr);
2708 return ret;
2709}
2710
2711static void qp_event_handler(struct ib_event *event, void *qp_context)
2712{
2713 struct ib_mad_qp_info *qp_info = qp_context;
2714
2715 /* It's worse than that! He's dead, Jim! */
2716 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2717 event->event, qp_info->qp->qp_num);
2718}
2719
2720static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2721 struct ib_mad_queue *mad_queue)
2722{
2723 mad_queue->qp_info = qp_info;
2724 mad_queue->count = 0;
2725 spin_lock_init(&mad_queue->lock);
2726 INIT_LIST_HEAD(&mad_queue->list);
2727}
2728
2729static void init_mad_qp(struct ib_mad_port_private *port_priv,
2730 struct ib_mad_qp_info *qp_info)
2731{
2732 qp_info->port_priv = port_priv;
2733 init_mad_queue(qp_info, &qp_info->send_queue);
2734 init_mad_queue(qp_info, &qp_info->recv_queue);
2735 INIT_LIST_HEAD(&qp_info->overflow_list);
2736 spin_lock_init(&qp_info->snoop_lock);
2737 qp_info->snoop_table = NULL;
2738 qp_info->snoop_table_size = 0;
2739 atomic_set(&qp_info->snoop_count, 0);
2740}
2741
2742static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2743 enum ib_qp_type qp_type)
2744{
2745 struct ib_qp_init_attr qp_init_attr;
2746 int ret;
2747
2748 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2749 qp_init_attr.send_cq = qp_info->port_priv->cq;
2750 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2751 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07002752 qp_init_attr.cap.max_send_wr = mad_sendq_size;
2753 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2755 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2756 qp_init_attr.qp_type = qp_type;
2757 qp_init_attr.port_num = qp_info->port_priv->port_num;
2758 qp_init_attr.qp_context = qp_info;
2759 qp_init_attr.event_handler = qp_event_handler;
2760 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2761 if (IS_ERR(qp_info->qp)) {
2762 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2763 get_spl_qp_index(qp_type));
2764 ret = PTR_ERR(qp_info->qp);
2765 goto error;
2766 }
2767 /* Use minimum queue sizes unless the CQ is resized */
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07002768 qp_info->send_queue.max_active = mad_sendq_size;
2769 qp_info->recv_queue.max_active = mad_recvq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 return 0;
2771
2772error:
2773 return ret;
2774}
2775
2776static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2777{
Eli Cohenfac70d52010-09-27 17:51:11 -07002778 if (!qp_info->qp)
2779 return;
2780
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 ib_destroy_qp(qp_info->qp);
Jesper Juhl6044ec82005-11-07 01:01:32 -08002782 kfree(qp_info->snoop_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783}
2784
2785/*
2786 * Open the port
2787 * Create the QP, PD, MR, and CQ if needed
2788 */
2789static int ib_mad_port_open(struct ib_device *device,
2790 int port_num)
2791{
2792 int ret, cq_size;
2793 struct ib_mad_port_private *port_priv;
2794 unsigned long flags;
2795 char name[sizeof "ib_mad123"];
Eli Cohenfac70d52010-09-27 17:51:11 -07002796 int has_smi;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 /* Create new device info */
Roland Dreierde6eb662005-11-02 07:23:14 -08002799 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 if (!port_priv) {
2801 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2802 return -ENOMEM;
2803 }
Roland Dreierde6eb662005-11-02 07:23:14 -08002804
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 port_priv->device = device;
2806 port_priv->port_num = port_num;
2807 spin_lock_init(&port_priv->reg_lock);
2808 INIT_LIST_HEAD(&port_priv->agent_list);
2809 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2810 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2811
Eli Cohenfac70d52010-09-27 17:51:11 -07002812 cq_size = mad_sendq_size + mad_recvq_size;
2813 has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
2814 if (has_smi)
2815 cq_size *= 2;
2816
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 port_priv->cq = ib_create_cq(port_priv->device,
Hal Rosenstock5dd2ce12005-08-15 14:16:36 -07002818 ib_mad_thread_completion_handler,
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03002819 NULL, port_priv, cq_size, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 if (IS_ERR(port_priv->cq)) {
2821 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2822 ret = PTR_ERR(port_priv->cq);
2823 goto error3;
2824 }
2825
2826 port_priv->pd = ib_alloc_pd(device);
2827 if (IS_ERR(port_priv->pd)) {
2828 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2829 ret = PTR_ERR(port_priv->pd);
2830 goto error4;
2831 }
2832
2833 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2834 if (IS_ERR(port_priv->mr)) {
2835 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2836 ret = PTR_ERR(port_priv->mr);
2837 goto error5;
2838 }
2839
Eli Cohenfac70d52010-09-27 17:51:11 -07002840 if (has_smi) {
2841 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2842 if (ret)
2843 goto error6;
2844 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2846 if (ret)
2847 goto error7;
2848
2849 snprintf(name, sizeof name, "ib_mad%d", port_num);
2850 port_priv->wq = create_singlethread_workqueue(name);
2851 if (!port_priv->wq) {
2852 ret = -ENOMEM;
2853 goto error8;
2854 }
David Howellsc4028952006-11-22 14:57:56 +00002855 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002857 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2858 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2859 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2860
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 ret = ib_mad_port_start(port_priv);
2862 if (ret) {
2863 printk(KERN_ERR PFX "Couldn't start port\n");
2864 goto error9;
2865 }
2866
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 return 0;
2868
2869error9:
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002870 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2871 list_del_init(&port_priv->port_list);
2872 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2873
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 destroy_workqueue(port_priv->wq);
2875error8:
2876 destroy_mad_qp(&port_priv->qp_info[1]);
2877error7:
2878 destroy_mad_qp(&port_priv->qp_info[0]);
2879error6:
2880 ib_dereg_mr(port_priv->mr);
2881error5:
2882 ib_dealloc_pd(port_priv->pd);
2883error4:
2884 ib_destroy_cq(port_priv->cq);
2885 cleanup_recv_queue(&port_priv->qp_info[1]);
2886 cleanup_recv_queue(&port_priv->qp_info[0]);
2887error3:
2888 kfree(port_priv);
2889
2890 return ret;
2891}
2892
2893/*
2894 * Close the port
2895 * If there are no classes using the port, free the port
2896 * resources (CQ, MR, PD, QP) and remove the port's info structure
2897 */
2898static int ib_mad_port_close(struct ib_device *device, int port_num)
2899{
2900 struct ib_mad_port_private *port_priv;
2901 unsigned long flags;
2902
2903 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2904 port_priv = __ib_get_mad_port(device, port_num);
2905 if (port_priv == NULL) {
2906 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2907 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2908 return -ENODEV;
2909 }
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002910 list_del_init(&port_priv->port_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2912
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 destroy_workqueue(port_priv->wq);
2914 destroy_mad_qp(&port_priv->qp_info[1]);
2915 destroy_mad_qp(&port_priv->qp_info[0]);
2916 ib_dereg_mr(port_priv->mr);
2917 ib_dealloc_pd(port_priv->pd);
2918 ib_destroy_cq(port_priv->cq);
2919 cleanup_recv_queue(&port_priv->qp_info[1]);
2920 cleanup_recv_queue(&port_priv->qp_info[0]);
2921 /* XXX: Handle deallocation of MAD registration tables */
2922
2923 kfree(port_priv);
2924
2925 return 0;
2926}
2927
2928static void ib_mad_init_device(struct ib_device *device)
2929{
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002930 int start, end, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931
Tom Tucker07ebafb2006-08-03 16:02:42 -05002932 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2933 return;
2934
2935 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002936 start = 0;
2937 end = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 } else {
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002939 start = 1;
2940 end = device->phys_port_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941 }
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002942
2943 for (i = start; i <= end; i++) {
2944 if (ib_mad_port_open(device, i)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002946 device->name, i);
2947 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 }
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002949 if (ib_agent_port_open(device, i)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 printk(KERN_ERR PFX "Couldn't open %s port %d "
2951 "for agents\n",
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002952 device->name, i);
2953 goto error_agent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954 }
2955 }
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07002956 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002958error_agent:
2959 if (ib_mad_port_close(device, i))
2960 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2961 device->name, i);
2962
2963error:
2964 i--;
2965
2966 while (i >= start) {
2967 if (ib_agent_port_close(device, i))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 printk(KERN_ERR PFX "Couldn't close %s port %d "
2969 "for agents\n",
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002970 device->name, i);
2971 if (ib_mad_port_close(device, i))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002973 device->name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 i--;
2975 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976}
2977
2978static void ib_mad_remove_device(struct ib_device *device)
2979{
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07002980 int i, num_ports, cur_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981
Steve Wise070e1402010-03-04 18:18:18 +00002982 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2983 return;
2984
Tom Tucker07ebafb2006-08-03 16:02:42 -05002985 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 num_ports = 1;
2987 cur_port = 0;
2988 } else {
2989 num_ports = device->phys_port_cnt;
2990 cur_port = 1;
2991 }
2992 for (i = 0; i < num_ports; i++, cur_port++) {
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07002993 if (ib_agent_port_close(device, cur_port))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 printk(KERN_ERR PFX "Couldn't close %s port %d "
2995 "for agents\n",
2996 device->name, cur_port);
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07002997 if (ib_mad_port_close(device, cur_port))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2999 device->name, cur_port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000 }
3001}
3002
3003static struct ib_client mad_client = {
3004 .name = "mad",
3005 .add = ib_mad_init_device,
3006 .remove = ib_mad_remove_device
3007};
3008
3009static int __init ib_mad_init_module(void)
3010{
3011 int ret;
3012
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07003013 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3014 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3015
3016 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3017 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3018
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019 ib_mad_cache = kmem_cache_create("ib_mad",
3020 sizeof(struct ib_mad_private),
3021 0,
3022 SLAB_HWCACHE_ALIGN,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023 NULL);
3024 if (!ib_mad_cache) {
3025 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
3026 ret = -ENOMEM;
3027 goto error1;
3028 }
3029
3030 INIT_LIST_HEAD(&ib_mad_port_list);
3031
3032 if (ib_register_client(&mad_client)) {
3033 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
3034 ret = -EINVAL;
3035 goto error2;
3036 }
3037
3038 return 0;
3039
3040error2:
3041 kmem_cache_destroy(ib_mad_cache);
3042error1:
3043 return ret;
3044}
3045
3046static void __exit ib_mad_cleanup_module(void)
3047{
3048 ib_unregister_client(&mad_client);
Alexey Dobriyan1a1d92c2006-09-27 01:49:40 -07003049 kmem_cache_destroy(ib_mad_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050}
3051
3052module_init(ib_mad_init_module);
3053module_exit(ib_mad_cleanup_module);