blob: 445ad0dda21389ab1ca77923e871e5b77a8bb49d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
Hal Rosenstockfa619a72005-07-27 11:45:37 -07003 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
Hal Rosenstockfa619a72005-07-27 11:45:37 -070034 * $Id: mad.c 2817 2005-07-07 11:29:26Z halr $
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38#include "mad_priv.h"
Hal Rosenstockfa619a72005-07-27 11:45:37 -070039#include "mad_rmpp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include "smi.h"
41#include "agent.h"
42
43MODULE_LICENSE("Dual BSD/GPL");
44MODULE_DESCRIPTION("kernel IB MAD API");
45MODULE_AUTHOR("Hal Rosenstock");
46MODULE_AUTHOR("Sean Hefty");
47
48
49kmem_cache_t *ib_mad_cache;
Hal Rosenstockfa619a72005-07-27 11:45:37 -070050
Linus Torvalds1da177e2005-04-16 15:20:36 -070051static struct list_head ib_mad_port_list;
52static u32 ib_mad_client_id = 0;
53
54/* Port list lock */
55static spinlock_t ib_mad_port_list_lock;
56
57
58/* Forward declarations */
59static int method_in_use(struct ib_mad_mgmt_method_table **method,
60 struct ib_mad_reg_req *mad_reg_req);
61static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
62static struct ib_mad_agent_private *find_mad_agent(
63 struct ib_mad_port_private *port_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -070064 struct ib_mad *mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -070065static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
66 struct ib_mad_private *mad);
67static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -070068static void timeout_sends(void *data);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069static void local_completions(void *data);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
71 struct ib_mad_agent_private *agent_priv,
72 u8 mgmt_class);
73static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
74 struct ib_mad_agent_private *agent_priv);
75
76/*
77 * Returns a ib_mad_port_private structure or NULL for a device/port
78 * Assumes ib_mad_port_list_lock is being held
79 */
80static inline struct ib_mad_port_private *
81__ib_get_mad_port(struct ib_device *device, int port_num)
82{
83 struct ib_mad_port_private *entry;
84
85 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
86 if (entry->device == device && entry->port_num == port_num)
87 return entry;
88 }
89 return NULL;
90}
91
92/*
93 * Wrapper function to return a ib_mad_port_private structure or NULL
94 * for a device/port
95 */
96static inline struct ib_mad_port_private *
97ib_get_mad_port(struct ib_device *device, int port_num)
98{
99 struct ib_mad_port_private *entry;
100 unsigned long flags;
101
102 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
103 entry = __ib_get_mad_port(device, port_num);
104 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
105
106 return entry;
107}
108
109static inline u8 convert_mgmt_class(u8 mgmt_class)
110{
111 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
112 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
113 0 : mgmt_class;
114}
115
116static int get_spl_qp_index(enum ib_qp_type qp_type)
117{
118 switch (qp_type)
119 {
120 case IB_QPT_SMI:
121 return 0;
122 case IB_QPT_GSI:
123 return 1;
124 default:
125 return -1;
126 }
127}
128
129static int vendor_class_index(u8 mgmt_class)
130{
131 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
132}
133
134static int is_vendor_class(u8 mgmt_class)
135{
136 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
137 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
138 return 0;
139 return 1;
140}
141
142static int is_vendor_oui(char *oui)
143{
144 if (oui[0] || oui[1] || oui[2])
145 return 1;
146 return 0;
147}
148
149static int is_vendor_method_in_use(
150 struct ib_mad_mgmt_vendor_class *vendor_class,
151 struct ib_mad_reg_req *mad_reg_req)
152{
153 struct ib_mad_mgmt_method_table *method;
154 int i;
155
156 for (i = 0; i < MAX_MGMT_OUI; i++) {
157 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
158 method = vendor_class->method_table[i];
159 if (method) {
160 if (method_in_use(&method, mad_reg_req))
161 return 1;
162 else
163 break;
164 }
165 }
166 }
167 return 0;
168}
169
170/*
171 * ib_register_mad_agent - Register to send/receive MADs
172 */
173struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
174 u8 port_num,
175 enum ib_qp_type qp_type,
176 struct ib_mad_reg_req *mad_reg_req,
177 u8 rmpp_version,
178 ib_mad_send_handler send_handler,
179 ib_mad_recv_handler recv_handler,
180 void *context)
181{
182 struct ib_mad_port_private *port_priv;
183 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
184 struct ib_mad_agent_private *mad_agent_priv;
185 struct ib_mad_reg_req *reg_req = NULL;
186 struct ib_mad_mgmt_class_table *class;
187 struct ib_mad_mgmt_vendor_class_table *vendor;
188 struct ib_mad_mgmt_vendor_class *vendor_class;
189 struct ib_mad_mgmt_method_table *method;
190 int ret2, qpn;
191 unsigned long flags;
192 u8 mgmt_class, vclass;
193
194 /* Validate parameters */
195 qpn = get_spl_qp_index(qp_type);
196 if (qpn == -1)
197 goto error1;
198
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700199 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
200 goto error1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202 /* Validate MAD registration request if supplied */
203 if (mad_reg_req) {
204 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
205 goto error1;
206 if (!recv_handler)
207 goto error1;
208 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
209 /*
210 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
211 * one in this range currently allowed
212 */
213 if (mad_reg_req->mgmt_class !=
214 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
215 goto error1;
216 } else if (mad_reg_req->mgmt_class == 0) {
217 /*
218 * Class 0 is reserved in IBA and is used for
219 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
220 */
221 goto error1;
222 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
223 /*
224 * If class is in "new" vendor range,
225 * ensure supplied OUI is not zero
226 */
227 if (!is_vendor_oui(mad_reg_req->oui))
228 goto error1;
229 }
230 /* Make sure class supplied is consistent with QP type */
231 if (qp_type == IB_QPT_SMI) {
232 if ((mad_reg_req->mgmt_class !=
233 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
234 (mad_reg_req->mgmt_class !=
235 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
236 goto error1;
237 } else {
238 if ((mad_reg_req->mgmt_class ==
239 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
240 (mad_reg_req->mgmt_class ==
241 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
242 goto error1;
243 }
244 } else {
245 /* No registration request supplied */
246 if (!send_handler)
247 goto error1;
248 }
249
250 /* Validate device and port */
251 port_priv = ib_get_mad_port(device, port_num);
252 if (!port_priv) {
253 ret = ERR_PTR(-ENODEV);
254 goto error1;
255 }
256
257 /* Allocate structures */
Roland Dreierde6eb662005-11-02 07:23:14 -0800258 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 if (!mad_agent_priv) {
260 ret = ERR_PTR(-ENOMEM);
261 goto error1;
262 }
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700263
264 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
265 IB_ACCESS_LOCAL_WRITE);
266 if (IS_ERR(mad_agent_priv->agent.mr)) {
267 ret = ERR_PTR(-ENOMEM);
268 goto error2;
269 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
271 if (mad_reg_req) {
272 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
273 if (!reg_req) {
274 ret = ERR_PTR(-ENOMEM);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700275 goto error3;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 }
277 /* Make a copy of the MAD registration request */
278 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
279 }
280
281 /* Now, fill in the various structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
283 mad_agent_priv->reg_req = reg_req;
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700284 mad_agent_priv->agent.rmpp_version = rmpp_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 mad_agent_priv->agent.device = device;
286 mad_agent_priv->agent.recv_handler = recv_handler;
287 mad_agent_priv->agent.send_handler = send_handler;
288 mad_agent_priv->agent.context = context;
289 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
290 mad_agent_priv->agent.port_num = port_num;
291
292 spin_lock_irqsave(&port_priv->reg_lock, flags);
293 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
294
295 /*
296 * Make sure MAD registration (if supplied)
297 * is non overlapping with any existing ones
298 */
299 if (mad_reg_req) {
300 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
301 if (!is_vendor_class(mgmt_class)) {
302 class = port_priv->version[mad_reg_req->
303 mgmt_class_version].class;
304 if (class) {
305 method = class->method_table[mgmt_class];
306 if (method) {
307 if (method_in_use(&method,
308 mad_reg_req))
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700309 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 }
311 }
312 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
313 mgmt_class);
314 } else {
315 /* "New" vendor class range */
316 vendor = port_priv->version[mad_reg_req->
317 mgmt_class_version].vendor;
318 if (vendor) {
319 vclass = vendor_class_index(mgmt_class);
320 vendor_class = vendor->vendor_class[vclass];
321 if (vendor_class) {
322 if (is_vendor_method_in_use(
323 vendor_class,
324 mad_reg_req))
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700325 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 }
327 }
328 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
329 }
330 if (ret2) {
331 ret = ERR_PTR(ret2);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700332 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 }
334 }
335
336 /* Add mad agent into port's agent list */
337 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
338 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
339
340 spin_lock_init(&mad_agent_priv->lock);
341 INIT_LIST_HEAD(&mad_agent_priv->send_list);
342 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
Hal Rosenstock6a0c4352005-07-27 11:45:26 -0700343 INIT_LIST_HEAD(&mad_agent_priv->done_list);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700344 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
346 INIT_LIST_HEAD(&mad_agent_priv->local_list);
347 INIT_WORK(&mad_agent_priv->local_work, local_completions,
348 mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 atomic_set(&mad_agent_priv->refcount, 1);
350 init_waitqueue_head(&mad_agent_priv->wait);
351
352 return &mad_agent_priv->agent;
353
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700354error4:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
356 kfree(reg_req);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700357error3:
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700358 ib_dereg_mr(mad_agent_priv->agent.mr);
Adrian Bunk2012a112005-11-27 00:37:36 +0100359error2:
360 kfree(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361error1:
362 return ret;
363}
364EXPORT_SYMBOL(ib_register_mad_agent);
365
366static inline int is_snooping_sends(int mad_snoop_flags)
367{
368 return (mad_snoop_flags &
369 (/*IB_MAD_SNOOP_POSTED_SENDS |
370 IB_MAD_SNOOP_RMPP_SENDS |*/
371 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
372 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
373}
374
375static inline int is_snooping_recvs(int mad_snoop_flags)
376{
377 return (mad_snoop_flags &
378 (IB_MAD_SNOOP_RECVS /*|
379 IB_MAD_SNOOP_RMPP_RECVS*/));
380}
381
382static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
383 struct ib_mad_snoop_private *mad_snoop_priv)
384{
385 struct ib_mad_snoop_private **new_snoop_table;
386 unsigned long flags;
387 int i;
388
389 spin_lock_irqsave(&qp_info->snoop_lock, flags);
390 /* Check for empty slot in array. */
391 for (i = 0; i < qp_info->snoop_table_size; i++)
392 if (!qp_info->snoop_table[i])
393 break;
394
395 if (i == qp_info->snoop_table_size) {
396 /* Grow table. */
397 new_snoop_table = kmalloc(sizeof mad_snoop_priv *
398 qp_info->snoop_table_size + 1,
399 GFP_ATOMIC);
400 if (!new_snoop_table) {
401 i = -ENOMEM;
402 goto out;
403 }
404 if (qp_info->snoop_table) {
405 memcpy(new_snoop_table, qp_info->snoop_table,
406 sizeof mad_snoop_priv *
407 qp_info->snoop_table_size);
408 kfree(qp_info->snoop_table);
409 }
410 qp_info->snoop_table = new_snoop_table;
411 qp_info->snoop_table_size++;
412 }
413 qp_info->snoop_table[i] = mad_snoop_priv;
414 atomic_inc(&qp_info->snoop_count);
415out:
416 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
417 return i;
418}
419
420struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
421 u8 port_num,
422 enum ib_qp_type qp_type,
423 int mad_snoop_flags,
424 ib_mad_snoop_handler snoop_handler,
425 ib_mad_recv_handler recv_handler,
426 void *context)
427{
428 struct ib_mad_port_private *port_priv;
429 struct ib_mad_agent *ret;
430 struct ib_mad_snoop_private *mad_snoop_priv;
431 int qpn;
432
433 /* Validate parameters */
434 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
435 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
436 ret = ERR_PTR(-EINVAL);
437 goto error1;
438 }
439 qpn = get_spl_qp_index(qp_type);
440 if (qpn == -1) {
441 ret = ERR_PTR(-EINVAL);
442 goto error1;
443 }
444 port_priv = ib_get_mad_port(device, port_num);
445 if (!port_priv) {
446 ret = ERR_PTR(-ENODEV);
447 goto error1;
448 }
449 /* Allocate structures */
Roland Dreierde6eb662005-11-02 07:23:14 -0800450 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 if (!mad_snoop_priv) {
452 ret = ERR_PTR(-ENOMEM);
453 goto error1;
454 }
455
456 /* Now, fill in the various structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
458 mad_snoop_priv->agent.device = device;
459 mad_snoop_priv->agent.recv_handler = recv_handler;
460 mad_snoop_priv->agent.snoop_handler = snoop_handler;
461 mad_snoop_priv->agent.context = context;
462 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
463 mad_snoop_priv->agent.port_num = port_num;
464 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
465 init_waitqueue_head(&mad_snoop_priv->wait);
466 mad_snoop_priv->snoop_index = register_snoop_agent(
467 &port_priv->qp_info[qpn],
468 mad_snoop_priv);
469 if (mad_snoop_priv->snoop_index < 0) {
470 ret = ERR_PTR(mad_snoop_priv->snoop_index);
471 goto error2;
472 }
473
474 atomic_set(&mad_snoop_priv->refcount, 1);
475 return &mad_snoop_priv->agent;
476
477error2:
478 kfree(mad_snoop_priv);
479error1:
480 return ret;
481}
482EXPORT_SYMBOL(ib_register_mad_snoop);
483
484static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
485{
486 struct ib_mad_port_private *port_priv;
487 unsigned long flags;
488
489 /* Note that we could still be handling received MADs */
490
491 /*
492 * Canceling all sends results in dropping received response
493 * MADs, preventing us from queuing additional work
494 */
495 cancel_mads(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 port_priv = mad_agent_priv->qp_info->port_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
499 spin_lock_irqsave(&port_priv->reg_lock, flags);
500 remove_mad_reg_req(mad_agent_priv);
501 list_del(&mad_agent_priv->agent_list);
502 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
503
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700504 flush_workqueue(port_priv->wq);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700505 ib_cancel_rmpp_recvs(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
507 atomic_dec(&mad_agent_priv->refcount);
508 wait_event(mad_agent_priv->wait,
509 !atomic_read(&mad_agent_priv->refcount));
510
Jesper Juhl6044ec82005-11-07 01:01:32 -0800511 kfree(mad_agent_priv->reg_req);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700512 ib_dereg_mr(mad_agent_priv->agent.mr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 kfree(mad_agent_priv);
514}
515
516static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
517{
518 struct ib_mad_qp_info *qp_info;
519 unsigned long flags;
520
521 qp_info = mad_snoop_priv->qp_info;
522 spin_lock_irqsave(&qp_info->snoop_lock, flags);
523 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
524 atomic_dec(&qp_info->snoop_count);
525 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
526
527 atomic_dec(&mad_snoop_priv->refcount);
528 wait_event(mad_snoop_priv->wait,
529 !atomic_read(&mad_snoop_priv->refcount));
530
531 kfree(mad_snoop_priv);
532}
533
534/*
535 * ib_unregister_mad_agent - Unregisters a client from using MAD services
536 */
537int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
538{
539 struct ib_mad_agent_private *mad_agent_priv;
540 struct ib_mad_snoop_private *mad_snoop_priv;
541
542 /* If the TID is zero, the agent can only snoop. */
543 if (mad_agent->hi_tid) {
544 mad_agent_priv = container_of(mad_agent,
545 struct ib_mad_agent_private,
546 agent);
547 unregister_mad_agent(mad_agent_priv);
548 } else {
549 mad_snoop_priv = container_of(mad_agent,
550 struct ib_mad_snoop_private,
551 agent);
552 unregister_mad_snoop(mad_snoop_priv);
553 }
554 return 0;
555}
556EXPORT_SYMBOL(ib_unregister_mad_agent);
557
Hal Rosenstock4a0754f2005-07-27 11:45:24 -0700558static inline int response_mad(struct ib_mad *mad)
559{
560 /* Trap represses are responses although response bit is reset */
561 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
562 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
563}
564
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565static void dequeue_mad(struct ib_mad_list_head *mad_list)
566{
567 struct ib_mad_queue *mad_queue;
568 unsigned long flags;
569
570 BUG_ON(!mad_list->mad_queue);
571 mad_queue = mad_list->mad_queue;
572 spin_lock_irqsave(&mad_queue->lock, flags);
573 list_del(&mad_list->list);
574 mad_queue->count--;
575 spin_unlock_irqrestore(&mad_queue->lock, flags);
576}
577
578static void snoop_send(struct ib_mad_qp_info *qp_info,
Sean Hefty34816ad2005-10-25 10:51:39 -0700579 struct ib_mad_send_buf *send_buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 struct ib_mad_send_wc *mad_send_wc,
581 int mad_snoop_flags)
582{
583 struct ib_mad_snoop_private *mad_snoop_priv;
584 unsigned long flags;
585 int i;
586
587 spin_lock_irqsave(&qp_info->snoop_lock, flags);
588 for (i = 0; i < qp_info->snoop_table_size; i++) {
589 mad_snoop_priv = qp_info->snoop_table[i];
590 if (!mad_snoop_priv ||
591 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
592 continue;
593
594 atomic_inc(&mad_snoop_priv->refcount);
595 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
596 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
Sean Hefty34816ad2005-10-25 10:51:39 -0700597 send_buf, mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
599 wake_up(&mad_snoop_priv->wait);
600 spin_lock_irqsave(&qp_info->snoop_lock, flags);
601 }
602 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
603}
604
605static void snoop_recv(struct ib_mad_qp_info *qp_info,
606 struct ib_mad_recv_wc *mad_recv_wc,
607 int mad_snoop_flags)
608{
609 struct ib_mad_snoop_private *mad_snoop_priv;
610 unsigned long flags;
611 int i;
612
613 spin_lock_irqsave(&qp_info->snoop_lock, flags);
614 for (i = 0; i < qp_info->snoop_table_size; i++) {
615 mad_snoop_priv = qp_info->snoop_table[i];
616 if (!mad_snoop_priv ||
617 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
618 continue;
619
620 atomic_inc(&mad_snoop_priv->refcount);
621 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
622 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
623 mad_recv_wc);
624 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
625 wake_up(&mad_snoop_priv->wait);
626 spin_lock_irqsave(&qp_info->snoop_lock, flags);
627 }
628 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
629}
630
631static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
632 struct ib_wc *wc)
633{
634 memset(wc, 0, sizeof *wc);
635 wc->wr_id = wr_id;
636 wc->status = IB_WC_SUCCESS;
637 wc->opcode = IB_WC_RECV;
638 wc->pkey_index = pkey_index;
639 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
640 wc->src_qp = IB_QP0;
641 wc->qp_num = IB_QP0;
642 wc->slid = slid;
643 wc->sl = 0;
644 wc->dlid_path_bits = 0;
645 wc->port_num = port_num;
646}
647
648/*
649 * Return 0 if SMP is to be sent
650 * Return 1 if SMP was consumed locally (whether or not solicited)
651 * Return < 0 if error
652 */
653static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
Sean Hefty34816ad2005-10-25 10:51:39 -0700654 struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655{
Hal Rosenstock4a0754f2005-07-27 11:45:24 -0700656 int ret;
Sean Hefty34816ad2005-10-25 10:51:39 -0700657 struct ib_smp *smp = mad_send_wr->send_buf.mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 unsigned long flags;
659 struct ib_mad_local_private *local;
660 struct ib_mad_private *mad_priv;
661 struct ib_mad_port_private *port_priv;
662 struct ib_mad_agent_private *recv_mad_agent = NULL;
663 struct ib_device *device = mad_agent_priv->agent.device;
664 u8 port_num = mad_agent_priv->agent.port_num;
665 struct ib_wc mad_wc;
Sean Hefty34816ad2005-10-25 10:51:39 -0700666 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
Ralph Campbell8cf3f042006-02-03 14:28:48 -0800668 /*
669 * Directed route handling starts if the initial LID routed part of
670 * a request or the ending LID routed part of a response is empty.
671 * If we are at the start of the LID routed part, don't update the
672 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
673 */
674 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
675 IB_LID_PERMISSIVE &&
676 !smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 ret = -EINVAL;
678 printk(KERN_ERR PFX "Invalid directed route\n");
679 goto out;
680 }
681 /* Check to post send on QP or process locally */
Ralph Campbell5e9f71a2006-02-03 14:32:01 -0800682 ret = smi_check_local_smp(smp, device);
683 if (!ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 goto out;
685
686 local = kmalloc(sizeof *local, GFP_ATOMIC);
687 if (!local) {
688 ret = -ENOMEM;
689 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
690 goto out;
691 }
692 local->mad_priv = NULL;
693 local->recv_mad_agent = NULL;
694 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
695 if (!mad_priv) {
696 ret = -ENOMEM;
697 printk(KERN_ERR PFX "No memory for local response MAD\n");
698 kfree(local);
699 goto out;
700 }
701
Sean Hefty97f52eb2005-08-13 21:05:57 -0700702 build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid),
703 send_wr->wr.ud.pkey_index,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 send_wr->wr.ud.port_num, &mad_wc);
705
706 /* No GRH for DR SMP */
707 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
708 (struct ib_mad *)smp,
709 (struct ib_mad *)&mad_priv->mad);
710 switch (ret)
711 {
712 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
Hal Rosenstock4a0754f2005-07-27 11:45:24 -0700713 if (response_mad(&mad_priv->mad.mad) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 mad_agent_priv->agent.recv_handler) {
715 local->mad_priv = mad_priv;
716 local->recv_mad_agent = mad_agent_priv;
717 /*
718 * Reference MAD agent until receive
719 * side of local completion handled
720 */
721 atomic_inc(&mad_agent_priv->refcount);
722 } else
723 kmem_cache_free(ib_mad_cache, mad_priv);
724 break;
725 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
726 kmem_cache_free(ib_mad_cache, mad_priv);
727 break;
728 case IB_MAD_RESULT_SUCCESS:
729 /* Treat like an incoming receive MAD */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
731 mad_agent_priv->agent.port_num);
732 if (port_priv) {
733 mad_priv->mad.mad.mad_hdr.tid =
734 ((struct ib_mad *)smp)->mad_hdr.tid;
735 recv_mad_agent = find_mad_agent(port_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -0700736 &mad_priv->mad.mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 }
738 if (!port_priv || !recv_mad_agent) {
739 kmem_cache_free(ib_mad_cache, mad_priv);
740 kfree(local);
741 ret = 0;
742 goto out;
743 }
744 local->mad_priv = mad_priv;
745 local->recv_mad_agent = recv_mad_agent;
746 break;
747 default:
748 kmem_cache_free(ib_mad_cache, mad_priv);
749 kfree(local);
750 ret = -EINVAL;
751 goto out;
752 }
753
Sean Hefty34816ad2005-10-25 10:51:39 -0700754 local->mad_send_wr = mad_send_wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 /* Reference MAD agent until send side of local completion handled */
756 atomic_inc(&mad_agent_priv->refcount);
757 /* Queue local completion to local list */
758 spin_lock_irqsave(&mad_agent_priv->lock, flags);
759 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
760 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
761 queue_work(mad_agent_priv->qp_info->port_priv->wq,
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700762 &mad_agent_priv->local_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 ret = 1;
764out:
765 return ret;
766}
767
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700768static int get_buf_length(int hdr_len, int data_len)
769{
770 int seg_size, pad;
771
772 seg_size = sizeof(struct ib_mad) - hdr_len;
773 if (data_len && seg_size) {
774 pad = seg_size - data_len % seg_size;
775 if (pad == seg_size)
776 pad = 0;
777 } else
778 pad = seg_size;
779 return hdr_len + data_len + pad;
780}
781
782struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
783 u32 remote_qpn, u16 pkey_index,
Sean Hefty34816ad2005-10-25 10:51:39 -0700784 int rmpp_active,
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700785 int hdr_len, int data_len,
Al Virodd0fc662005-10-07 07:46:04 +0100786 gfp_t gfp_mask)
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700787{
788 struct ib_mad_agent_private *mad_agent_priv;
Sean Hefty34816ad2005-10-25 10:51:39 -0700789 struct ib_mad_send_wr_private *mad_send_wr;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700790 int buf_size;
791 void *buf;
792
Sean Hefty34816ad2005-10-25 10:51:39 -0700793 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
794 agent);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700795 buf_size = get_buf_length(hdr_len, data_len);
796
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700797 if ((!mad_agent->rmpp_version &&
798 (rmpp_active || buf_size > sizeof(struct ib_mad))) ||
799 (!rmpp_active && buf_size > sizeof(struct ib_mad)))
800 return ERR_PTR(-EINVAL);
801
Roland Dreierde6eb662005-11-02 07:23:14 -0800802 buf = kzalloc(sizeof *mad_send_wr + buf_size, gfp_mask);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700803 if (!buf)
804 return ERR_PTR(-ENOMEM);
805
Sean Hefty34816ad2005-10-25 10:51:39 -0700806 mad_send_wr = buf + buf_size;
807 mad_send_wr->send_buf.mad = buf;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700808
Sean Hefty34816ad2005-10-25 10:51:39 -0700809 mad_send_wr->mad_agent_priv = mad_agent_priv;
810 mad_send_wr->sg_list[0].length = buf_size;
811 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700812
Sean Hefty34816ad2005-10-25 10:51:39 -0700813 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
814 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
815 mad_send_wr->send_wr.num_sge = 1;
816 mad_send_wr->send_wr.opcode = IB_WR_SEND;
817 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
818 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
819 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
820 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700821
822 if (rmpp_active) {
Sean Hefty34816ad2005-10-25 10:51:39 -0700823 struct ib_rmpp_mad *rmpp_mad = mad_send_wr->send_buf.mad;
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700824 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(hdr_len -
Sean Hefty34816ad2005-10-25 10:51:39 -0700825 IB_MGMT_RMPP_HDR + data_len);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700826 rmpp_mad->rmpp_hdr.rmpp_version = mad_agent->rmpp_version;
827 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
828 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr,
829 IB_MGMT_RMPP_FLAG_ACTIVE);
830 }
831
Sean Hefty34816ad2005-10-25 10:51:39 -0700832 mad_send_wr->send_buf.mad_agent = mad_agent;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700833 atomic_inc(&mad_agent_priv->refcount);
Sean Hefty34816ad2005-10-25 10:51:39 -0700834 return &mad_send_wr->send_buf;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700835}
836EXPORT_SYMBOL(ib_create_send_mad);
837
838void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
839{
840 struct ib_mad_agent_private *mad_agent_priv;
841
842 mad_agent_priv = container_of(send_buf->mad_agent,
843 struct ib_mad_agent_private, agent);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700844 kfree(send_buf->mad);
845
846 if (atomic_dec_and_test(&mad_agent_priv->refcount))
847 wake_up(&mad_agent_priv->wait);
848}
849EXPORT_SYMBOL(ib_free_send_mad);
850
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700851int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852{
853 struct ib_mad_qp_info *qp_info;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -0700854 struct list_head *list;
Sean Hefty34816ad2005-10-25 10:51:39 -0700855 struct ib_send_wr *bad_send_wr;
856 struct ib_mad_agent *mad_agent;
857 struct ib_sge *sge;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 unsigned long flags;
859 int ret;
860
Hal Rosenstockf8197a42005-07-27 11:45:24 -0700861 /* Set WR ID to find mad_send_wr upon completion */
Hal Rosenstockd760ce82005-07-27 11:45:25 -0700862 qp_info = mad_send_wr->mad_agent_priv->qp_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
864 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
865
Sean Hefty34816ad2005-10-25 10:51:39 -0700866 mad_agent = mad_send_wr->send_buf.mad_agent;
867 sge = mad_send_wr->sg_list;
868 sge->addr = dma_map_single(mad_agent->device->dma_device,
869 mad_send_wr->send_buf.mad, sge->length,
870 DMA_TO_DEVICE);
871 pci_unmap_addr_set(mad_send_wr, mapping, sge->addr);
872
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -0700874 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
Sean Hefty34816ad2005-10-25 10:51:39 -0700875 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
876 &bad_send_wr);
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -0700877 list = &qp_info->send_queue.list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 ret = 0;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -0700880 list = &qp_info->overflow_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 }
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -0700882
883 if (!ret) {
884 qp_info->send_queue.count++;
885 list_add_tail(&mad_send_wr->mad_list.list, list);
886 }
887 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
Sean Hefty34816ad2005-10-25 10:51:39 -0700888 if (ret)
889 dma_unmap_single(mad_agent->device->dma_device,
890 pci_unmap_addr(mad_send_wr, mapping),
891 sge->length, DMA_TO_DEVICE);
892
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 return ret;
894}
895
896/*
897 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
898 * with the registered client
899 */
Sean Hefty34816ad2005-10-25 10:51:39 -0700900int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
901 struct ib_mad_send_buf **bad_send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 struct ib_mad_agent_private *mad_agent_priv;
Sean Hefty34816ad2005-10-25 10:51:39 -0700904 struct ib_mad_send_buf *next_send_buf;
905 struct ib_mad_send_wr_private *mad_send_wr;
906 unsigned long flags;
907 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
909 /* Walk list of send WRs and post each on send list */
Sean Hefty34816ad2005-10-25 10:51:39 -0700910 for (; send_buf; send_buf = next_send_buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
Sean Hefty34816ad2005-10-25 10:51:39 -0700912 mad_send_wr = container_of(send_buf,
913 struct ib_mad_send_wr_private,
914 send_buf);
915 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916
Sean Hefty34816ad2005-10-25 10:51:39 -0700917 if (!send_buf->mad_agent->send_handler ||
918 (send_buf->timeout_ms &&
919 !send_buf->mad_agent->recv_handler)) {
920 ret = -EINVAL;
921 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 }
923
924 /*
925 * Save pointer to next work request to post in case the
926 * current one completes, and the user modifies the work
927 * request associated with the completion
928 */
Sean Hefty34816ad2005-10-25 10:51:39 -0700929 next_send_buf = send_buf->next;
930 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
Sean Hefty34816ad2005-10-25 10:51:39 -0700932 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
933 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
934 ret = handle_outgoing_dr_smp(mad_agent_priv,
935 mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 if (ret < 0) /* error */
Sean Hefty34816ad2005-10-25 10:51:39 -0700937 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 else if (ret == 1) /* locally consumed */
Sean Hefty34816ad2005-10-25 10:51:39 -0700939 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 }
941
Sean Hefty34816ad2005-10-25 10:51:39 -0700942 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 /* Timeout will be updated after send completes */
Sean Hefty34816ad2005-10-25 10:51:39 -0700944 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
945 mad_send_wr->retries = send_buf->retries;
946 /* Reference for work request to QP + response */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
948 mad_send_wr->status = IB_WC_SUCCESS;
949
950 /* Reference MAD agent until send completes */
951 atomic_inc(&mad_agent_priv->refcount);
952 spin_lock_irqsave(&mad_agent_priv->lock, flags);
953 list_add_tail(&mad_send_wr->agent_list,
954 &mad_agent_priv->send_list);
955 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
956
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700957 if (mad_agent_priv->agent.rmpp_version) {
958 ret = ib_send_rmpp_mad(mad_send_wr);
959 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
960 ret = ib_send_mad(mad_send_wr);
961 } else
962 ret = ib_send_mad(mad_send_wr);
963 if (ret < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 /* Fail send request */
965 spin_lock_irqsave(&mad_agent_priv->lock, flags);
966 list_del(&mad_send_wr->agent_list);
967 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
968 atomic_dec(&mad_agent_priv->refcount);
Sean Hefty34816ad2005-10-25 10:51:39 -0700969 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 }
972 return 0;
Sean Hefty34816ad2005-10-25 10:51:39 -0700973error:
974 if (bad_send_buf)
975 *bad_send_buf = send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 return ret;
977}
978EXPORT_SYMBOL(ib_post_send_mad);
979
980/*
981 * ib_free_recv_mad - Returns data buffers used to receive
982 * a MAD to the access layer
983 */
984void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
985{
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700986 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 struct ib_mad_private_header *mad_priv_hdr;
988 struct ib_mad_private *priv;
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700989 struct list_head free_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700991 INIT_LIST_HEAD(&free_list);
992 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700994 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
995 &free_list, list) {
996 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
997 recv_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 mad_priv_hdr = container_of(mad_recv_wc,
999 struct ib_mad_private_header,
1000 recv_wc);
1001 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1002 header);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001003 kmem_cache_free(ib_mad_cache, priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005}
1006EXPORT_SYMBOL(ib_free_recv_mad);
1007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1009 u8 rmpp_version,
1010 ib_mad_send_handler send_handler,
1011 ib_mad_recv_handler recv_handler,
1012 void *context)
1013{
1014 return ERR_PTR(-EINVAL); /* XXX: for now */
1015}
1016EXPORT_SYMBOL(ib_redirect_mad_qp);
1017
1018int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1019 struct ib_wc *wc)
1020{
1021 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1022 return 0;
1023}
1024EXPORT_SYMBOL(ib_process_mad_wc);
1025
1026static int method_in_use(struct ib_mad_mgmt_method_table **method,
1027 struct ib_mad_reg_req *mad_reg_req)
1028{
1029 int i;
1030
1031 for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1032 i < IB_MGMT_MAX_METHODS;
1033 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1034 1+i)) {
1035 if ((*method)->agent[i]) {
1036 printk(KERN_ERR PFX "Method %d already in use\n", i);
1037 return -EINVAL;
1038 }
1039 }
1040 return 0;
1041}
1042
1043static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1044{
1045 /* Allocate management method table */
Roland Dreierde6eb662005-11-02 07:23:14 -08001046 *method = kzalloc(sizeof **method, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 if (!*method) {
1048 printk(KERN_ERR PFX "No memory for "
1049 "ib_mad_mgmt_method_table\n");
1050 return -ENOMEM;
1051 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052
1053 return 0;
1054}
1055
1056/*
1057 * Check to see if there are any methods still in use
1058 */
1059static int check_method_table(struct ib_mad_mgmt_method_table *method)
1060{
1061 int i;
1062
1063 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1064 if (method->agent[i])
1065 return 1;
1066 return 0;
1067}
1068
1069/*
1070 * Check to see if there are any method tables for this class still in use
1071 */
1072static int check_class_table(struct ib_mad_mgmt_class_table *class)
1073{
1074 int i;
1075
1076 for (i = 0; i < MAX_MGMT_CLASS; i++)
1077 if (class->method_table[i])
1078 return 1;
1079 return 0;
1080}
1081
1082static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1083{
1084 int i;
1085
1086 for (i = 0; i < MAX_MGMT_OUI; i++)
1087 if (vendor_class->method_table[i])
1088 return 1;
1089 return 0;
1090}
1091
1092static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1093 char *oui)
1094{
1095 int i;
1096
1097 for (i = 0; i < MAX_MGMT_OUI; i++)
1098 /* Is there matching OUI for this vendor class ? */
1099 if (!memcmp(vendor_class->oui[i], oui, 3))
1100 return i;
1101
1102 return -1;
1103}
1104
1105static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1106{
1107 int i;
1108
1109 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1110 if (vendor->vendor_class[i])
1111 return 1;
1112
1113 return 0;
1114}
1115
1116static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1117 struct ib_mad_agent_private *agent)
1118{
1119 int i;
1120
1121 /* Remove any methods for this mad agent */
1122 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1123 if (method->agent[i] == agent) {
1124 method->agent[i] = NULL;
1125 }
1126 }
1127}
1128
1129static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1130 struct ib_mad_agent_private *agent_priv,
1131 u8 mgmt_class)
1132{
1133 struct ib_mad_port_private *port_priv;
1134 struct ib_mad_mgmt_class_table **class;
1135 struct ib_mad_mgmt_method_table **method;
1136 int i, ret;
1137
1138 port_priv = agent_priv->qp_info->port_priv;
1139 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1140 if (!*class) {
1141 /* Allocate management class table for "new" class version */
Roland Dreierde6eb662005-11-02 07:23:14 -08001142 *class = kzalloc(sizeof **class, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 if (!*class) {
1144 printk(KERN_ERR PFX "No memory for "
1145 "ib_mad_mgmt_class_table\n");
1146 ret = -ENOMEM;
1147 goto error1;
1148 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001149
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 /* Allocate method table for this management class */
1151 method = &(*class)->method_table[mgmt_class];
1152 if ((ret = allocate_method_table(method)))
1153 goto error2;
1154 } else {
1155 method = &(*class)->method_table[mgmt_class];
1156 if (!*method) {
1157 /* Allocate method table for this management class */
1158 if ((ret = allocate_method_table(method)))
1159 goto error1;
1160 }
1161 }
1162
1163 /* Now, make sure methods are not already in use */
1164 if (method_in_use(method, mad_reg_req))
1165 goto error3;
1166
1167 /* Finally, add in methods being registered */
1168 for (i = find_first_bit(mad_reg_req->method_mask,
1169 IB_MGMT_MAX_METHODS);
1170 i < IB_MGMT_MAX_METHODS;
1171 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1172 1+i)) {
1173 (*method)->agent[i] = agent_priv;
1174 }
1175 return 0;
1176
1177error3:
1178 /* Remove any methods for this mad agent */
1179 remove_methods_mad_agent(*method, agent_priv);
1180 /* Now, check to see if there are any methods in use */
1181 if (!check_method_table(*method)) {
1182 /* If not, release management method table */
1183 kfree(*method);
1184 *method = NULL;
1185 }
1186 ret = -EINVAL;
1187 goto error1;
1188error2:
1189 kfree(*class);
1190 *class = NULL;
1191error1:
1192 return ret;
1193}
1194
1195static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1196 struct ib_mad_agent_private *agent_priv)
1197{
1198 struct ib_mad_port_private *port_priv;
1199 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1200 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1201 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1202 struct ib_mad_mgmt_method_table **method;
1203 int i, ret = -ENOMEM;
1204 u8 vclass;
1205
1206 /* "New" vendor (with OUI) class */
1207 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1208 port_priv = agent_priv->qp_info->port_priv;
1209 vendor_table = &port_priv->version[
1210 mad_reg_req->mgmt_class_version].vendor;
1211 if (!*vendor_table) {
1212 /* Allocate mgmt vendor class table for "new" class version */
Roland Dreierde6eb662005-11-02 07:23:14 -08001213 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 if (!vendor) {
1215 printk(KERN_ERR PFX "No memory for "
1216 "ib_mad_mgmt_vendor_class_table\n");
1217 goto error1;
1218 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001219
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 *vendor_table = vendor;
1221 }
1222 if (!(*vendor_table)->vendor_class[vclass]) {
1223 /* Allocate table for this management vendor class */
Roland Dreierde6eb662005-11-02 07:23:14 -08001224 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 if (!vendor_class) {
1226 printk(KERN_ERR PFX "No memory for "
1227 "ib_mad_mgmt_vendor_class\n");
1228 goto error2;
1229 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001230
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 (*vendor_table)->vendor_class[vclass] = vendor_class;
1232 }
1233 for (i = 0; i < MAX_MGMT_OUI; i++) {
1234 /* Is there matching OUI for this vendor class ? */
1235 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1236 mad_reg_req->oui, 3)) {
1237 method = &(*vendor_table)->vendor_class[
1238 vclass]->method_table[i];
1239 BUG_ON(!*method);
1240 goto check_in_use;
1241 }
1242 }
1243 for (i = 0; i < MAX_MGMT_OUI; i++) {
1244 /* OUI slot available ? */
1245 if (!is_vendor_oui((*vendor_table)->vendor_class[
1246 vclass]->oui[i])) {
1247 method = &(*vendor_table)->vendor_class[
1248 vclass]->method_table[i];
1249 BUG_ON(*method);
1250 /* Allocate method table for this OUI */
1251 if ((ret = allocate_method_table(method)))
1252 goto error3;
1253 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1254 mad_reg_req->oui, 3);
1255 goto check_in_use;
1256 }
1257 }
1258 printk(KERN_ERR PFX "All OUI slots in use\n");
1259 goto error3;
1260
1261check_in_use:
1262 /* Now, make sure methods are not already in use */
1263 if (method_in_use(method, mad_reg_req))
1264 goto error4;
1265
1266 /* Finally, add in methods being registered */
1267 for (i = find_first_bit(mad_reg_req->method_mask,
1268 IB_MGMT_MAX_METHODS);
1269 i < IB_MGMT_MAX_METHODS;
1270 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1271 1+i)) {
1272 (*method)->agent[i] = agent_priv;
1273 }
1274 return 0;
1275
1276error4:
1277 /* Remove any methods for this mad agent */
1278 remove_methods_mad_agent(*method, agent_priv);
1279 /* Now, check to see if there are any methods in use */
1280 if (!check_method_table(*method)) {
1281 /* If not, release management method table */
1282 kfree(*method);
1283 *method = NULL;
1284 }
1285 ret = -EINVAL;
1286error3:
1287 if (vendor_class) {
1288 (*vendor_table)->vendor_class[vclass] = NULL;
1289 kfree(vendor_class);
1290 }
1291error2:
1292 if (vendor) {
1293 *vendor_table = NULL;
1294 kfree(vendor);
1295 }
1296error1:
1297 return ret;
1298}
1299
1300static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1301{
1302 struct ib_mad_port_private *port_priv;
1303 struct ib_mad_mgmt_class_table *class;
1304 struct ib_mad_mgmt_method_table *method;
1305 struct ib_mad_mgmt_vendor_class_table *vendor;
1306 struct ib_mad_mgmt_vendor_class *vendor_class;
1307 int index;
1308 u8 mgmt_class;
1309
1310 /*
1311 * Was MAD registration request supplied
1312 * with original registration ?
1313 */
1314 if (!agent_priv->reg_req) {
1315 goto out;
1316 }
1317
1318 port_priv = agent_priv->qp_info->port_priv;
1319 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1320 class = port_priv->version[
1321 agent_priv->reg_req->mgmt_class_version].class;
1322 if (!class)
1323 goto vendor_check;
1324
1325 method = class->method_table[mgmt_class];
1326 if (method) {
1327 /* Remove any methods for this mad agent */
1328 remove_methods_mad_agent(method, agent_priv);
1329 /* Now, check to see if there are any methods still in use */
1330 if (!check_method_table(method)) {
1331 /* If not, release management method table */
1332 kfree(method);
1333 class->method_table[mgmt_class] = NULL;
1334 /* Any management classes left ? */
1335 if (!check_class_table(class)) {
1336 /* If not, release management class table */
1337 kfree(class);
1338 port_priv->version[
1339 agent_priv->reg_req->
1340 mgmt_class_version].class = NULL;
1341 }
1342 }
1343 }
1344
1345vendor_check:
1346 if (!is_vendor_class(mgmt_class))
1347 goto out;
1348
1349 /* normalize mgmt_class to vendor range 2 */
1350 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1351 vendor = port_priv->version[
1352 agent_priv->reg_req->mgmt_class_version].vendor;
1353
1354 if (!vendor)
1355 goto out;
1356
1357 vendor_class = vendor->vendor_class[mgmt_class];
1358 if (vendor_class) {
1359 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1360 if (index < 0)
1361 goto out;
1362 method = vendor_class->method_table[index];
1363 if (method) {
1364 /* Remove any methods for this mad agent */
1365 remove_methods_mad_agent(method, agent_priv);
1366 /*
1367 * Now, check to see if there are
1368 * any methods still in use
1369 */
1370 if (!check_method_table(method)) {
1371 /* If not, release management method table */
1372 kfree(method);
1373 vendor_class->method_table[index] = NULL;
1374 memset(vendor_class->oui[index], 0, 3);
1375 /* Any OUIs left ? */
1376 if (!check_vendor_class(vendor_class)) {
1377 /* If not, release vendor class table */
1378 kfree(vendor_class);
1379 vendor->vendor_class[mgmt_class] = NULL;
1380 /* Any other vendor classes left ? */
1381 if (!check_vendor_table(vendor)) {
1382 kfree(vendor);
1383 port_priv->version[
1384 agent_priv->reg_req->
1385 mgmt_class_version].
1386 vendor = NULL;
1387 }
1388 }
1389 }
1390 }
1391 }
1392
1393out:
1394 return;
1395}
1396
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397static struct ib_mad_agent_private *
1398find_mad_agent(struct ib_mad_port_private *port_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001399 struct ib_mad *mad)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400{
1401 struct ib_mad_agent_private *mad_agent = NULL;
1402 unsigned long flags;
1403
1404 spin_lock_irqsave(&port_priv->reg_lock, flags);
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001405 if (response_mad(mad)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 u32 hi_tid;
1407 struct ib_mad_agent_private *entry;
1408
1409 /*
1410 * Routing is based on high 32 bits of transaction ID
1411 * of MAD.
1412 */
1413 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
Sean Hefty34816ad2005-10-25 10:51:39 -07001414 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 if (entry->agent.hi_tid == hi_tid) {
1416 mad_agent = entry;
1417 break;
1418 }
1419 }
1420 } else {
1421 struct ib_mad_mgmt_class_table *class;
1422 struct ib_mad_mgmt_method_table *method;
1423 struct ib_mad_mgmt_vendor_class_table *vendor;
1424 struct ib_mad_mgmt_vendor_class *vendor_class;
1425 struct ib_vendor_mad *vendor_mad;
1426 int index;
1427
1428 /*
1429 * Routing is based on version, class, and method
1430 * For "newer" vendor MADs, also based on OUI
1431 */
1432 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1433 goto out;
1434 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1435 class = port_priv->version[
1436 mad->mad_hdr.class_version].class;
1437 if (!class)
1438 goto out;
1439 method = class->method_table[convert_mgmt_class(
1440 mad->mad_hdr.mgmt_class)];
1441 if (method)
1442 mad_agent = method->agent[mad->mad_hdr.method &
1443 ~IB_MGMT_METHOD_RESP];
1444 } else {
1445 vendor = port_priv->version[
1446 mad->mad_hdr.class_version].vendor;
1447 if (!vendor)
1448 goto out;
1449 vendor_class = vendor->vendor_class[vendor_class_index(
1450 mad->mad_hdr.mgmt_class)];
1451 if (!vendor_class)
1452 goto out;
1453 /* Find matching OUI */
1454 vendor_mad = (struct ib_vendor_mad *)mad;
1455 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1456 if (index == -1)
1457 goto out;
1458 method = vendor_class->method_table[index];
1459 if (method) {
1460 mad_agent = method->agent[mad->mad_hdr.method &
1461 ~IB_MGMT_METHOD_RESP];
1462 }
1463 }
1464 }
1465
1466 if (mad_agent) {
1467 if (mad_agent->agent.recv_handler)
1468 atomic_inc(&mad_agent->refcount);
1469 else {
1470 printk(KERN_NOTICE PFX "No receive handler for client "
1471 "%p on port %d\n",
1472 &mad_agent->agent, port_priv->port_num);
1473 mad_agent = NULL;
1474 }
1475 }
1476out:
1477 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1478
1479 return mad_agent;
1480}
1481
1482static int validate_mad(struct ib_mad *mad, u32 qp_num)
1483{
1484 int valid = 0;
1485
1486 /* Make sure MAD base version is understood */
1487 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1488 printk(KERN_ERR PFX "MAD received with unsupported base "
1489 "version %d\n", mad->mad_hdr.base_version);
1490 goto out;
1491 }
1492
1493 /* Filter SMI packets sent to other than QP0 */
1494 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1495 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1496 if (qp_num == 0)
1497 valid = 1;
1498 } else {
1499 /* Filter GSI packets sent to QP0 */
1500 if (qp_num != 0)
1501 valid = 1;
1502 }
1503
1504out:
1505 return valid;
1506}
1507
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001508static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1509 struct ib_mad_hdr *mad_hdr)
1510{
1511 struct ib_rmpp_mad *rmpp_mad;
1512
1513 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1514 return !mad_agent_priv->agent.rmpp_version ||
1515 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1516 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1517 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1518}
1519
1520struct ib_mad_send_wr_private*
Sean Hefty97f52eb2005-08-13 21:05:57 -07001521ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522{
1523 struct ib_mad_send_wr_private *mad_send_wr;
1524
1525 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
1526 agent_list) {
1527 if (mad_send_wr->tid == tid)
1528 return mad_send_wr;
1529 }
1530
1531 /*
1532 * It's possible to receive the response before we've
1533 * been notified that the send has completed
1534 */
1535 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1536 agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07001537 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001538 mad_send_wr->tid == tid && mad_send_wr->timeout) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 /* Verify request has not been canceled */
1540 return (mad_send_wr->status == IB_WC_SUCCESS) ?
1541 mad_send_wr : NULL;
1542 }
1543 }
1544 return NULL;
1545}
1546
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001547void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001548{
1549 mad_send_wr->timeout = 0;
1550 if (mad_send_wr->refcount == 1) {
1551 list_del(&mad_send_wr->agent_list);
1552 list_add_tail(&mad_send_wr->agent_list,
1553 &mad_send_wr->mad_agent_priv->done_list);
1554 }
1555}
1556
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001558 struct ib_mad_recv_wc *mad_recv_wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559{
1560 struct ib_mad_send_wr_private *mad_send_wr;
1561 struct ib_mad_send_wc mad_send_wc;
1562 unsigned long flags;
Sean Hefty97f52eb2005-08-13 21:05:57 -07001563 __be64 tid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001565 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1566 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1567 if (mad_agent_priv->agent.rmpp_version) {
1568 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1569 mad_recv_wc);
1570 if (!mad_recv_wc) {
1571 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1572 wake_up(&mad_agent_priv->wait);
1573 return;
1574 }
1575 }
1576
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 /* Complete corresponding request */
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001578 if (response_mad(mad_recv_wc->recv_buf.mad)) {
1579 tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001581 mad_send_wr = ib_find_send_mad(mad_agent_priv, tid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 if (!mad_send_wr) {
1583 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001584 ib_free_recv_mad(mad_recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1586 wake_up(&mad_agent_priv->wait);
1587 return;
1588 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001589 ib_mark_mad_done(mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1591
1592 /* Defined behavior is to complete response before request */
Sean Hefty34816ad2005-10-25 10:51:39 -07001593 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001594 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1595 mad_recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 atomic_dec(&mad_agent_priv->refcount);
1597
1598 mad_send_wc.status = IB_WC_SUCCESS;
1599 mad_send_wc.vendor_err = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001600 mad_send_wc.send_buf = &mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1602 } else {
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001603 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1604 mad_recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1606 wake_up(&mad_agent_priv->wait);
1607 }
1608}
1609
1610static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1611 struct ib_wc *wc)
1612{
1613 struct ib_mad_qp_info *qp_info;
1614 struct ib_mad_private_header *mad_priv_hdr;
1615 struct ib_mad_private *recv, *response;
1616 struct ib_mad_list_head *mad_list;
1617 struct ib_mad_agent_private *mad_agent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618
1619 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1620 if (!response)
1621 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1622 "for response buffer\n");
1623
1624 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1625 qp_info = mad_list->mad_queue->qp_info;
1626 dequeue_mad(mad_list);
1627
1628 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1629 mad_list);
1630 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1631 dma_unmap_single(port_priv->device->dma_device,
1632 pci_unmap_addr(&recv->header, mapping),
1633 sizeof(struct ib_mad_private) -
1634 sizeof(struct ib_mad_private_header),
1635 DMA_FROM_DEVICE);
1636
1637 /* Setup MAD receive work completion from "normal" work completion */
Sean Hefty24239af2005-04-16 15:26:08 -07001638 recv->header.wc = *wc;
1639 recv->header.recv_wc.wc = &recv->header.wc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1641 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1642 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1643
1644 if (atomic_read(&qp_info->snoop_count))
1645 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1646
1647 /* Validate MAD */
1648 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1649 goto out;
1650
1651 if (recv->mad.mad.mad_hdr.mgmt_class ==
1652 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1653 if (!smi_handle_dr_smp_recv(&recv->mad.smp,
1654 port_priv->device->node_type,
1655 port_priv->port_num,
1656 port_priv->device->phys_port_cnt))
1657 goto out;
1658 if (!smi_check_forward_dr_smp(&recv->mad.smp))
1659 goto local;
1660 if (!smi_handle_dr_smp_send(&recv->mad.smp,
1661 port_priv->device->node_type,
1662 port_priv->port_num))
1663 goto out;
Ralph Campbell5e9f71a2006-02-03 14:32:01 -08001664 if (!smi_check_local_smp(&recv->mad.smp, port_priv->device))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 goto out;
1666 }
1667
1668local:
1669 /* Give driver "right of first refusal" on incoming MAD */
1670 if (port_priv->device->process_mad) {
1671 int ret;
1672
1673 if (!response) {
1674 printk(KERN_ERR PFX "No memory for response MAD\n");
1675 /*
1676 * Is it better to assume that
1677 * it wouldn't be processed ?
1678 */
1679 goto out;
1680 }
1681
1682 ret = port_priv->device->process_mad(port_priv->device, 0,
1683 port_priv->port_num,
1684 wc, &recv->grh,
1685 &recv->mad.mad,
1686 &response->mad.mad);
1687 if (ret & IB_MAD_RESULT_SUCCESS) {
1688 if (ret & IB_MAD_RESULT_CONSUMED)
1689 goto out;
1690 if (ret & IB_MAD_RESULT_REPLY) {
Sean Hefty34816ad2005-10-25 10:51:39 -07001691 agent_send_response(&response->mad.mad,
1692 &recv->grh, wc,
1693 port_priv->device,
1694 port_priv->port_num,
1695 qp_info->qp->qp_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 goto out;
1697 }
1698 }
1699 }
1700
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001701 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 if (mad_agent) {
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001703 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 /*
1705 * recv is freed up in error cases in ib_mad_complete_recv
1706 * or via recv_handler in ib_mad_complete_recv()
1707 */
1708 recv = NULL;
1709 }
1710
1711out:
1712 /* Post another receive request for this QP */
1713 if (response) {
1714 ib_mad_post_receive_mads(qp_info, response);
1715 if (recv)
1716 kmem_cache_free(ib_mad_cache, recv);
1717 } else
1718 ib_mad_post_receive_mads(qp_info, recv);
1719}
1720
1721static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1722{
1723 struct ib_mad_send_wr_private *mad_send_wr;
1724 unsigned long delay;
1725
1726 if (list_empty(&mad_agent_priv->wait_list)) {
1727 cancel_delayed_work(&mad_agent_priv->timed_work);
1728 } else {
1729 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1730 struct ib_mad_send_wr_private,
1731 agent_list);
1732
1733 if (time_after(mad_agent_priv->timeout,
1734 mad_send_wr->timeout)) {
1735 mad_agent_priv->timeout = mad_send_wr->timeout;
1736 cancel_delayed_work(&mad_agent_priv->timed_work);
1737 delay = mad_send_wr->timeout - jiffies;
1738 if ((long)delay <= 0)
1739 delay = 1;
1740 queue_delayed_work(mad_agent_priv->qp_info->
1741 port_priv->wq,
1742 &mad_agent_priv->timed_work, delay);
1743 }
1744 }
1745}
1746
Hal Rosenstockd760ce82005-07-27 11:45:25 -07001747static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748{
Hal Rosenstockd760ce82005-07-27 11:45:25 -07001749 struct ib_mad_agent_private *mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 struct ib_mad_send_wr_private *temp_mad_send_wr;
1751 struct list_head *list_item;
1752 unsigned long delay;
1753
Hal Rosenstockd760ce82005-07-27 11:45:25 -07001754 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 list_del(&mad_send_wr->agent_list);
1756
1757 delay = mad_send_wr->timeout;
1758 mad_send_wr->timeout += jiffies;
1759
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07001760 if (delay) {
1761 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
1762 temp_mad_send_wr = list_entry(list_item,
1763 struct ib_mad_send_wr_private,
1764 agent_list);
1765 if (time_after(mad_send_wr->timeout,
1766 temp_mad_send_wr->timeout))
1767 break;
1768 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 }
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07001770 else
1771 list_item = &mad_agent_priv->wait_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 list_add(&mad_send_wr->agent_list, list_item);
1773
1774 /* Reschedule a work item if we have a shorter timeout */
1775 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
1776 cancel_delayed_work(&mad_agent_priv->timed_work);
1777 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
1778 &mad_agent_priv->timed_work, delay);
1779 }
1780}
1781
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07001782void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
1783 int timeout_ms)
1784{
1785 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
1786 wait_for_response(mad_send_wr);
1787}
1788
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789/*
1790 * Process a send work completion
1791 */
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001792void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1793 struct ib_mad_send_wc *mad_send_wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794{
1795 struct ib_mad_agent_private *mad_agent_priv;
1796 unsigned long flags;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001797 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798
Hal Rosenstockd760ce82005-07-27 11:45:25 -07001799 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001801 if (mad_agent_priv->agent.rmpp_version) {
1802 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
1803 if (ret == IB_RMPP_RESULT_CONSUMED)
1804 goto done;
1805 } else
1806 ret = IB_RMPP_RESULT_UNHANDLED;
1807
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 if (mad_send_wc->status != IB_WC_SUCCESS &&
1809 mad_send_wr->status == IB_WC_SUCCESS) {
1810 mad_send_wr->status = mad_send_wc->status;
1811 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1812 }
1813
1814 if (--mad_send_wr->refcount > 0) {
1815 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
1816 mad_send_wr->status == IB_WC_SUCCESS) {
Hal Rosenstockd760ce82005-07-27 11:45:25 -07001817 wait_for_response(mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001819 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 }
1821
1822 /* Remove send from MAD agent and notify client of completion */
1823 list_del(&mad_send_wr->agent_list);
1824 adjust_timeout(mad_agent_priv);
1825 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1826
1827 if (mad_send_wr->status != IB_WC_SUCCESS )
1828 mad_send_wc->status = mad_send_wr->status;
Sean Hefty34816ad2005-10-25 10:51:39 -07001829 if (ret == IB_RMPP_RESULT_INTERNAL)
1830 ib_rmpp_send_handler(mad_send_wc);
1831 else
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001832 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
1833 mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834
1835 /* Release reference on agent taken when sending */
1836 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1837 wake_up(&mad_agent_priv->wait);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001838 return;
1839done:
1840 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841}
1842
1843static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
1844 struct ib_wc *wc)
1845{
1846 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
1847 struct ib_mad_list_head *mad_list;
1848 struct ib_mad_qp_info *qp_info;
1849 struct ib_mad_queue *send_queue;
1850 struct ib_send_wr *bad_send_wr;
Sean Hefty34816ad2005-10-25 10:51:39 -07001851 struct ib_mad_send_wc mad_send_wc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 unsigned long flags;
1853 int ret;
1854
1855 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1856 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1857 mad_list);
1858 send_queue = mad_list->mad_queue;
1859 qp_info = send_queue->qp_info;
1860
1861retry:
Sean Hefty34816ad2005-10-25 10:51:39 -07001862 dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
1863 pci_unmap_addr(mad_send_wr, mapping),
1864 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 queued_send_wr = NULL;
1866 spin_lock_irqsave(&send_queue->lock, flags);
1867 list_del(&mad_list->list);
1868
1869 /* Move queued send to the send queue */
1870 if (send_queue->count-- > send_queue->max_active) {
1871 mad_list = container_of(qp_info->overflow_list.next,
1872 struct ib_mad_list_head, list);
1873 queued_send_wr = container_of(mad_list,
1874 struct ib_mad_send_wr_private,
1875 mad_list);
1876 list_del(&mad_list->list);
1877 list_add_tail(&mad_list->list, &send_queue->list);
1878 }
1879 spin_unlock_irqrestore(&send_queue->lock, flags);
1880
Sean Hefty34816ad2005-10-25 10:51:39 -07001881 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1882 mad_send_wc.status = wc->status;
1883 mad_send_wc.vendor_err = wc->vendor_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 if (atomic_read(&qp_info->snoop_count))
Sean Hefty34816ad2005-10-25 10:51:39 -07001885 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 IB_MAD_SNOOP_SEND_COMPLETIONS);
Sean Hefty34816ad2005-10-25 10:51:39 -07001887 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
1889 if (queued_send_wr) {
1890 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
Sean Hefty34816ad2005-10-25 10:51:39 -07001891 &bad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 if (ret) {
1893 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
1894 mad_send_wr = queued_send_wr;
1895 wc->status = IB_WC_LOC_QP_OP_ERR;
1896 goto retry;
1897 }
1898 }
1899}
1900
1901static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
1902{
1903 struct ib_mad_send_wr_private *mad_send_wr;
1904 struct ib_mad_list_head *mad_list;
1905 unsigned long flags;
1906
1907 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1908 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
1909 mad_send_wr = container_of(mad_list,
1910 struct ib_mad_send_wr_private,
1911 mad_list);
1912 mad_send_wr->retry = 1;
1913 }
1914 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1915}
1916
1917static void mad_error_handler(struct ib_mad_port_private *port_priv,
1918 struct ib_wc *wc)
1919{
1920 struct ib_mad_list_head *mad_list;
1921 struct ib_mad_qp_info *qp_info;
1922 struct ib_mad_send_wr_private *mad_send_wr;
1923 int ret;
1924
1925 /* Determine if failure was a send or receive */
1926 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1927 qp_info = mad_list->mad_queue->qp_info;
1928 if (mad_list->mad_queue == &qp_info->recv_queue)
1929 /*
1930 * Receive errors indicate that the QP has entered the error
1931 * state - error handling/shutdown code will cleanup
1932 */
1933 return;
1934
1935 /*
1936 * Send errors will transition the QP to SQE - move
1937 * QP to RTS and repost flushed work requests
1938 */
1939 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1940 mad_list);
1941 if (wc->status == IB_WC_WR_FLUSH_ERR) {
1942 if (mad_send_wr->retry) {
1943 /* Repost send */
1944 struct ib_send_wr *bad_send_wr;
1945
1946 mad_send_wr->retry = 0;
1947 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
1948 &bad_send_wr);
1949 if (ret)
1950 ib_mad_send_done_handler(port_priv, wc);
1951 } else
1952 ib_mad_send_done_handler(port_priv, wc);
1953 } else {
1954 struct ib_qp_attr *attr;
1955
1956 /* Transition QP to RTS and fail offending send */
1957 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1958 if (attr) {
1959 attr->qp_state = IB_QPS_RTS;
1960 attr->cur_qp_state = IB_QPS_SQE;
1961 ret = ib_modify_qp(qp_info->qp, attr,
1962 IB_QP_STATE | IB_QP_CUR_STATE);
1963 kfree(attr);
1964 if (ret)
1965 printk(KERN_ERR PFX "mad_error_handler - "
1966 "ib_modify_qp to RTS : %d\n", ret);
1967 else
1968 mark_sends_for_retry(qp_info);
1969 }
1970 ib_mad_send_done_handler(port_priv, wc);
1971 }
1972}
1973
1974/*
1975 * IB MAD completion callback
1976 */
1977static void ib_mad_completion_handler(void *data)
1978{
1979 struct ib_mad_port_private *port_priv;
1980 struct ib_wc wc;
1981
1982 port_priv = (struct ib_mad_port_private *)data;
1983 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
1984
1985 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
1986 if (wc.status == IB_WC_SUCCESS) {
1987 switch (wc.opcode) {
1988 case IB_WC_SEND:
1989 ib_mad_send_done_handler(port_priv, &wc);
1990 break;
1991 case IB_WC_RECV:
1992 ib_mad_recv_done_handler(port_priv, &wc);
1993 break;
1994 default:
1995 BUG_ON(1);
1996 break;
1997 }
1998 } else
1999 mad_error_handler(port_priv, &wc);
2000 }
2001}
2002
2003static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2004{
2005 unsigned long flags;
2006 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2007 struct ib_mad_send_wc mad_send_wc;
2008 struct list_head cancel_list;
2009
2010 INIT_LIST_HEAD(&cancel_list);
2011
2012 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2013 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2014 &mad_agent_priv->send_list, agent_list) {
2015 if (mad_send_wr->status == IB_WC_SUCCESS) {
2016 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2017 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2018 }
2019 }
2020
2021 /* Empty wait list to prevent receives from finding a request */
2022 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
Hal Rosenstock2c153b92005-07-27 11:45:31 -07002023 /* Empty local completion list as well */
2024 list_splice_init(&mad_agent_priv->local_list, &cancel_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2026
2027 /* Report all cancelled requests */
2028 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2029 mad_send_wc.vendor_err = 0;
2030
2031 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2032 &cancel_list, agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002033 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2034 list_del(&mad_send_wr->agent_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2036 &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 atomic_dec(&mad_agent_priv->refcount);
2038 }
2039}
2040
2041static struct ib_mad_send_wr_private*
Sean Hefty34816ad2005-10-25 10:51:39 -07002042find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2043 struct ib_mad_send_buf *send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044{
2045 struct ib_mad_send_wr_private *mad_send_wr;
2046
2047 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2048 agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002049 if (&mad_send_wr->send_buf == send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 return mad_send_wr;
2051 }
2052
2053 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2054 agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002055 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2056 &mad_send_wr->send_buf == send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 return mad_send_wr;
2058 }
2059 return NULL;
2060}
2061
Sean Hefty34816ad2005-10-25 10:51:39 -07002062int ib_modify_mad(struct ib_mad_agent *mad_agent,
2063 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064{
2065 struct ib_mad_agent_private *mad_agent_priv;
2066 struct ib_mad_send_wr_private *mad_send_wr;
2067 unsigned long flags;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002068 int active;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069
2070 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2071 agent);
2072 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Sean Hefty34816ad2005-10-25 10:51:39 -07002073 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002074 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002076 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 }
2078
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002079 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002080 if (!timeout_ms) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002082 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 }
2084
Sean Hefty34816ad2005-10-25 10:51:39 -07002085 mad_send_wr->send_buf.timeout_ms = timeout_ms;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002086 if (active)
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002087 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2088 else
2089 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002091 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2092 return 0;
2093}
2094EXPORT_SYMBOL(ib_modify_mad);
2095
Sean Hefty34816ad2005-10-25 10:51:39 -07002096void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2097 struct ib_mad_send_buf *send_buf)
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002098{
Sean Hefty34816ad2005-10-25 10:51:39 -07002099 ib_modify_mad(mad_agent, send_buf, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100}
2101EXPORT_SYMBOL(ib_cancel_mad);
2102
2103static void local_completions(void *data)
2104{
2105 struct ib_mad_agent_private *mad_agent_priv;
2106 struct ib_mad_local_private *local;
2107 struct ib_mad_agent_private *recv_mad_agent;
2108 unsigned long flags;
Hal Rosenstock2c153b92005-07-27 11:45:31 -07002109 int recv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 struct ib_wc wc;
2111 struct ib_mad_send_wc mad_send_wc;
2112
2113 mad_agent_priv = (struct ib_mad_agent_private *)data;
2114
2115 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2116 while (!list_empty(&mad_agent_priv->local_list)) {
2117 local = list_entry(mad_agent_priv->local_list.next,
2118 struct ib_mad_local_private,
2119 completion_list);
2120 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2121 if (local->mad_priv) {
2122 recv_mad_agent = local->recv_mad_agent;
2123 if (!recv_mad_agent) {
2124 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 goto local_send_completion;
2126 }
2127
Hal Rosenstock2c153b92005-07-27 11:45:31 -07002128 recv = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 /*
2130 * Defined behavior is to complete response
2131 * before request
2132 */
Sean Hefty34816ad2005-10-25 10:51:39 -07002133 build_smp_wc((unsigned long) local->mad_send_wr,
Sean Hefty97f52eb2005-08-13 21:05:57 -07002134 be16_to_cpu(IB_LID_PERMISSIVE),
Sean Hefty34816ad2005-10-25 10:51:39 -07002135 0, recv_mad_agent->agent.port_num, &wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136
2137 local->mad_priv->header.recv_wc.wc = &wc;
2138 local->mad_priv->header.recv_wc.mad_len =
2139 sizeof(struct ib_mad);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002140 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2141 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2142 &local->mad_priv->header.recv_wc.rmpp_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2144 local->mad_priv->header.recv_wc.recv_buf.mad =
2145 &local->mad_priv->mad.mad;
2146 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2147 snoop_recv(recv_mad_agent->qp_info,
2148 &local->mad_priv->header.recv_wc,
2149 IB_MAD_SNOOP_RECVS);
2150 recv_mad_agent->agent.recv_handler(
2151 &recv_mad_agent->agent,
2152 &local->mad_priv->header.recv_wc);
2153 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2154 atomic_dec(&recv_mad_agent->refcount);
2155 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2156 }
2157
2158local_send_completion:
2159 /* Complete send */
2160 mad_send_wc.status = IB_WC_SUCCESS;
2161 mad_send_wc.vendor_err = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07002162 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
Sean Hefty34816ad2005-10-25 10:51:39 -07002164 snoop_send(mad_agent_priv->qp_info,
2165 &local->mad_send_wr->send_buf,
2166 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2168 &mad_send_wc);
2169
2170 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2171 list_del(&local->completion_list);
2172 atomic_dec(&mad_agent_priv->refcount);
Hal Rosenstock2c153b92005-07-27 11:45:31 -07002173 if (!recv)
2174 kmem_cache_free(ib_mad_cache, local->mad_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 kfree(local);
2176 }
2177 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2178}
2179
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002180static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2181{
2182 int ret;
2183
2184 if (!mad_send_wr->retries--)
2185 return -ETIMEDOUT;
2186
Sean Hefty34816ad2005-10-25 10:51:39 -07002187 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002188
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002189 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2190 ret = ib_retry_rmpp(mad_send_wr);
2191 switch (ret) {
2192 case IB_RMPP_RESULT_UNHANDLED:
2193 ret = ib_send_mad(mad_send_wr);
2194 break;
2195 case IB_RMPP_RESULT_CONSUMED:
2196 ret = 0;
2197 break;
2198 default:
2199 ret = -ECOMM;
2200 break;
2201 }
2202 } else
2203 ret = ib_send_mad(mad_send_wr);
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002204
2205 if (!ret) {
2206 mad_send_wr->refcount++;
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002207 list_add_tail(&mad_send_wr->agent_list,
2208 &mad_send_wr->mad_agent_priv->send_list);
2209 }
2210 return ret;
2211}
2212
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213static void timeout_sends(void *data)
2214{
2215 struct ib_mad_agent_private *mad_agent_priv;
2216 struct ib_mad_send_wr_private *mad_send_wr;
2217 struct ib_mad_send_wc mad_send_wc;
2218 unsigned long flags, delay;
2219
2220 mad_agent_priv = (struct ib_mad_agent_private *)data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 mad_send_wc.vendor_err = 0;
2222
2223 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2224 while (!list_empty(&mad_agent_priv->wait_list)) {
2225 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2226 struct ib_mad_send_wr_private,
2227 agent_list);
2228
2229 if (time_after(mad_send_wr->timeout, jiffies)) {
2230 delay = mad_send_wr->timeout - jiffies;
2231 if ((long)delay <= 0)
2232 delay = 1;
2233 queue_delayed_work(mad_agent_priv->qp_info->
2234 port_priv->wq,
2235 &mad_agent_priv->timed_work, delay);
2236 break;
2237 }
2238
Hal Rosenstockdbf92272005-07-27 11:45:30 -07002239 list_del(&mad_send_wr->agent_list);
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002240 if (mad_send_wr->status == IB_WC_SUCCESS &&
2241 !retry_send(mad_send_wr))
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002242 continue;
2243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2245
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002246 if (mad_send_wr->status == IB_WC_SUCCESS)
2247 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2248 else
2249 mad_send_wc.status = mad_send_wr->status;
Sean Hefty34816ad2005-10-25 10:51:39 -07002250 mad_send_wc.send_buf = &mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2252 &mad_send_wc);
2253
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 atomic_dec(&mad_agent_priv->refcount);
2255 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2256 }
2257 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2258}
2259
Hal Rosenstock5dd2ce12005-08-15 14:16:36 -07002260static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261{
2262 struct ib_mad_port_private *port_priv = cq->cq_context;
2263
2264 queue_work(port_priv->wq, &port_priv->work);
2265}
2266
2267/*
2268 * Allocate receive MADs and post receive WRs for them
2269 */
2270static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2271 struct ib_mad_private *mad)
2272{
2273 unsigned long flags;
2274 int post, ret;
2275 struct ib_mad_private *mad_priv;
2276 struct ib_sge sg_list;
2277 struct ib_recv_wr recv_wr, *bad_recv_wr;
2278 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2279
2280 /* Initialize common scatter list fields */
2281 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2282 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2283
2284 /* Initialize common receive WR fields */
2285 recv_wr.next = NULL;
2286 recv_wr.sg_list = &sg_list;
2287 recv_wr.num_sge = 1;
2288
2289 do {
2290 /* Allocate and map receive buffer */
2291 if (mad) {
2292 mad_priv = mad;
2293 mad = NULL;
2294 } else {
2295 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2296 if (!mad_priv) {
2297 printk(KERN_ERR PFX "No memory for receive buffer\n");
2298 ret = -ENOMEM;
2299 break;
2300 }
2301 }
2302 sg_list.addr = dma_map_single(qp_info->port_priv->
2303 device->dma_device,
2304 &mad_priv->grh,
2305 sizeof *mad_priv -
2306 sizeof mad_priv->header,
2307 DMA_FROM_DEVICE);
2308 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2309 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2310 mad_priv->header.mad_list.mad_queue = recv_queue;
2311
2312 /* Post receive WR */
2313 spin_lock_irqsave(&recv_queue->lock, flags);
2314 post = (++recv_queue->count < recv_queue->max_active);
2315 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2316 spin_unlock_irqrestore(&recv_queue->lock, flags);
2317 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2318 if (ret) {
2319 spin_lock_irqsave(&recv_queue->lock, flags);
2320 list_del(&mad_priv->header.mad_list.list);
2321 recv_queue->count--;
2322 spin_unlock_irqrestore(&recv_queue->lock, flags);
2323 dma_unmap_single(qp_info->port_priv->device->dma_device,
2324 pci_unmap_addr(&mad_priv->header,
2325 mapping),
2326 sizeof *mad_priv -
2327 sizeof mad_priv->header,
2328 DMA_FROM_DEVICE);
2329 kmem_cache_free(ib_mad_cache, mad_priv);
2330 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2331 break;
2332 }
2333 } while (post);
2334
2335 return ret;
2336}
2337
2338/*
2339 * Return all the posted receive MADs
2340 */
2341static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2342{
2343 struct ib_mad_private_header *mad_priv_hdr;
2344 struct ib_mad_private *recv;
2345 struct ib_mad_list_head *mad_list;
2346
2347 while (!list_empty(&qp_info->recv_queue.list)) {
2348
2349 mad_list = list_entry(qp_info->recv_queue.list.next,
2350 struct ib_mad_list_head, list);
2351 mad_priv_hdr = container_of(mad_list,
2352 struct ib_mad_private_header,
2353 mad_list);
2354 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2355 header);
2356
2357 /* Remove from posted receive MAD list */
2358 list_del(&mad_list->list);
2359
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 dma_unmap_single(qp_info->port_priv->device->dma_device,
2361 pci_unmap_addr(&recv->header, mapping),
2362 sizeof(struct ib_mad_private) -
2363 sizeof(struct ib_mad_private_header),
2364 DMA_FROM_DEVICE);
2365 kmem_cache_free(ib_mad_cache, recv);
2366 }
2367
2368 qp_info->recv_queue.count = 0;
2369}
2370
2371/*
2372 * Start the port
2373 */
2374static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2375{
2376 int ret, i;
2377 struct ib_qp_attr *attr;
2378 struct ib_qp *qp;
2379
2380 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2381 if (!attr) {
2382 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2383 return -ENOMEM;
2384 }
2385
2386 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2387 qp = port_priv->qp_info[i].qp;
2388 /*
2389 * PKey index for QP1 is irrelevant but
2390 * one is needed for the Reset to Init transition
2391 */
2392 attr->qp_state = IB_QPS_INIT;
2393 attr->pkey_index = 0;
2394 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2395 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2396 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2397 if (ret) {
2398 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2399 "INIT: %d\n", i, ret);
2400 goto out;
2401 }
2402
2403 attr->qp_state = IB_QPS_RTR;
2404 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2405 if (ret) {
2406 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2407 "RTR: %d\n", i, ret);
2408 goto out;
2409 }
2410
2411 attr->qp_state = IB_QPS_RTS;
2412 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2413 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2414 if (ret) {
2415 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2416 "RTS: %d\n", i, ret);
2417 goto out;
2418 }
2419 }
2420
2421 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2422 if (ret) {
2423 printk(KERN_ERR PFX "Failed to request completion "
2424 "notification: %d\n", ret);
2425 goto out;
2426 }
2427
2428 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2429 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2430 if (ret) {
2431 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2432 goto out;
2433 }
2434 }
2435out:
2436 kfree(attr);
2437 return ret;
2438}
2439
2440static void qp_event_handler(struct ib_event *event, void *qp_context)
2441{
2442 struct ib_mad_qp_info *qp_info = qp_context;
2443
2444 /* It's worse than that! He's dead, Jim! */
2445 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2446 event->event, qp_info->qp->qp_num);
2447}
2448
2449static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2450 struct ib_mad_queue *mad_queue)
2451{
2452 mad_queue->qp_info = qp_info;
2453 mad_queue->count = 0;
2454 spin_lock_init(&mad_queue->lock);
2455 INIT_LIST_HEAD(&mad_queue->list);
2456}
2457
2458static void init_mad_qp(struct ib_mad_port_private *port_priv,
2459 struct ib_mad_qp_info *qp_info)
2460{
2461 qp_info->port_priv = port_priv;
2462 init_mad_queue(qp_info, &qp_info->send_queue);
2463 init_mad_queue(qp_info, &qp_info->recv_queue);
2464 INIT_LIST_HEAD(&qp_info->overflow_list);
2465 spin_lock_init(&qp_info->snoop_lock);
2466 qp_info->snoop_table = NULL;
2467 qp_info->snoop_table_size = 0;
2468 atomic_set(&qp_info->snoop_count, 0);
2469}
2470
2471static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2472 enum ib_qp_type qp_type)
2473{
2474 struct ib_qp_init_attr qp_init_attr;
2475 int ret;
2476
2477 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2478 qp_init_attr.send_cq = qp_info->port_priv->cq;
2479 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2480 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2481 qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2482 qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2483 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2484 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2485 qp_init_attr.qp_type = qp_type;
2486 qp_init_attr.port_num = qp_info->port_priv->port_num;
2487 qp_init_attr.qp_context = qp_info;
2488 qp_init_attr.event_handler = qp_event_handler;
2489 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2490 if (IS_ERR(qp_info->qp)) {
2491 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2492 get_spl_qp_index(qp_type));
2493 ret = PTR_ERR(qp_info->qp);
2494 goto error;
2495 }
2496 /* Use minimum queue sizes unless the CQ is resized */
2497 qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2498 qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2499 return 0;
2500
2501error:
2502 return ret;
2503}
2504
2505static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2506{
2507 ib_destroy_qp(qp_info->qp);
Jesper Juhl6044ec82005-11-07 01:01:32 -08002508 kfree(qp_info->snoop_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509}
2510
2511/*
2512 * Open the port
2513 * Create the QP, PD, MR, and CQ if needed
2514 */
2515static int ib_mad_port_open(struct ib_device *device,
2516 int port_num)
2517{
2518 int ret, cq_size;
2519 struct ib_mad_port_private *port_priv;
2520 unsigned long flags;
2521 char name[sizeof "ib_mad123"];
2522
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523 /* Create new device info */
Roland Dreierde6eb662005-11-02 07:23:14 -08002524 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525 if (!port_priv) {
2526 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2527 return -ENOMEM;
2528 }
Roland Dreierde6eb662005-11-02 07:23:14 -08002529
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 port_priv->device = device;
2531 port_priv->port_num = port_num;
2532 spin_lock_init(&port_priv->reg_lock);
2533 INIT_LIST_HEAD(&port_priv->agent_list);
2534 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2535 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2536
2537 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2538 port_priv->cq = ib_create_cq(port_priv->device,
Hal Rosenstock5dd2ce12005-08-15 14:16:36 -07002539 ib_mad_thread_completion_handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 NULL, port_priv, cq_size);
2541 if (IS_ERR(port_priv->cq)) {
2542 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2543 ret = PTR_ERR(port_priv->cq);
2544 goto error3;
2545 }
2546
2547 port_priv->pd = ib_alloc_pd(device);
2548 if (IS_ERR(port_priv->pd)) {
2549 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2550 ret = PTR_ERR(port_priv->pd);
2551 goto error4;
2552 }
2553
2554 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2555 if (IS_ERR(port_priv->mr)) {
2556 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2557 ret = PTR_ERR(port_priv->mr);
2558 goto error5;
2559 }
2560
2561 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2562 if (ret)
2563 goto error6;
2564 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2565 if (ret)
2566 goto error7;
2567
2568 snprintf(name, sizeof name, "ib_mad%d", port_num);
2569 port_priv->wq = create_singlethread_workqueue(name);
2570 if (!port_priv->wq) {
2571 ret = -ENOMEM;
2572 goto error8;
2573 }
2574 INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
2575
2576 ret = ib_mad_port_start(port_priv);
2577 if (ret) {
2578 printk(KERN_ERR PFX "Couldn't start port\n");
2579 goto error9;
2580 }
2581
2582 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2583 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2584 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2585 return 0;
2586
2587error9:
2588 destroy_workqueue(port_priv->wq);
2589error8:
2590 destroy_mad_qp(&port_priv->qp_info[1]);
2591error7:
2592 destroy_mad_qp(&port_priv->qp_info[0]);
2593error6:
2594 ib_dereg_mr(port_priv->mr);
2595error5:
2596 ib_dealloc_pd(port_priv->pd);
2597error4:
2598 ib_destroy_cq(port_priv->cq);
2599 cleanup_recv_queue(&port_priv->qp_info[1]);
2600 cleanup_recv_queue(&port_priv->qp_info[0]);
2601error3:
2602 kfree(port_priv);
2603
2604 return ret;
2605}
2606
2607/*
2608 * Close the port
2609 * If there are no classes using the port, free the port
2610 * resources (CQ, MR, PD, QP) and remove the port's info structure
2611 */
2612static int ib_mad_port_close(struct ib_device *device, int port_num)
2613{
2614 struct ib_mad_port_private *port_priv;
2615 unsigned long flags;
2616
2617 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2618 port_priv = __ib_get_mad_port(device, port_num);
2619 if (port_priv == NULL) {
2620 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2621 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2622 return -ENODEV;
2623 }
2624 list_del(&port_priv->port_list);
2625 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2626
2627 /* Stop processing completions. */
2628 flush_workqueue(port_priv->wq);
2629 destroy_workqueue(port_priv->wq);
2630 destroy_mad_qp(&port_priv->qp_info[1]);
2631 destroy_mad_qp(&port_priv->qp_info[0]);
2632 ib_dereg_mr(port_priv->mr);
2633 ib_dealloc_pd(port_priv->pd);
2634 ib_destroy_cq(port_priv->cq);
2635 cleanup_recv_queue(&port_priv->qp_info[1]);
2636 cleanup_recv_queue(&port_priv->qp_info[0]);
2637 /* XXX: Handle deallocation of MAD registration tables */
2638
2639 kfree(port_priv);
2640
2641 return 0;
2642}
2643
2644static void ib_mad_init_device(struct ib_device *device)
2645{
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002646 int start, end, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647
2648 if (device->node_type == IB_NODE_SWITCH) {
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002649 start = 0;
2650 end = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651 } else {
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002652 start = 1;
2653 end = device->phys_port_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 }
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002655
2656 for (i = start; i <= end; i++) {
2657 if (ib_mad_port_open(device, i)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002659 device->name, i);
2660 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661 }
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002662 if (ib_agent_port_open(device, i)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663 printk(KERN_ERR PFX "Couldn't open %s port %d "
2664 "for agents\n",
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002665 device->name, i);
2666 goto error_agent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 }
2668 }
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07002669 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002671error_agent:
2672 if (ib_mad_port_close(device, i))
2673 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2674 device->name, i);
2675
2676error:
2677 i--;
2678
2679 while (i >= start) {
2680 if (ib_agent_port_close(device, i))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 printk(KERN_ERR PFX "Couldn't close %s port %d "
2682 "for agents\n",
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002683 device->name, i);
2684 if (ib_mad_port_close(device, i))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
Roland Dreier4ab6fb72005-10-06 13:28:16 -07002686 device->name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 i--;
2688 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689}
2690
2691static void ib_mad_remove_device(struct ib_device *device)
2692{
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07002693 int i, num_ports, cur_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694
2695 if (device->node_type == IB_NODE_SWITCH) {
2696 num_ports = 1;
2697 cur_port = 0;
2698 } else {
2699 num_ports = device->phys_port_cnt;
2700 cur_port = 1;
2701 }
2702 for (i = 0; i < num_ports; i++, cur_port++) {
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07002703 if (ib_agent_port_close(device, cur_port))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 printk(KERN_ERR PFX "Couldn't close %s port %d "
2705 "for agents\n",
2706 device->name, cur_port);
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07002707 if (ib_mad_port_close(device, cur_port))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2709 device->name, cur_port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 }
2711}
2712
2713static struct ib_client mad_client = {
2714 .name = "mad",
2715 .add = ib_mad_init_device,
2716 .remove = ib_mad_remove_device
2717};
2718
2719static int __init ib_mad_init_module(void)
2720{
2721 int ret;
2722
2723 spin_lock_init(&ib_mad_port_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724
2725 ib_mad_cache = kmem_cache_create("ib_mad",
2726 sizeof(struct ib_mad_private),
2727 0,
2728 SLAB_HWCACHE_ALIGN,
2729 NULL,
2730 NULL);
2731 if (!ib_mad_cache) {
2732 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
2733 ret = -ENOMEM;
2734 goto error1;
2735 }
2736
2737 INIT_LIST_HEAD(&ib_mad_port_list);
2738
2739 if (ib_register_client(&mad_client)) {
2740 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
2741 ret = -EINVAL;
2742 goto error2;
2743 }
2744
2745 return 0;
2746
2747error2:
2748 kmem_cache_destroy(ib_mad_cache);
2749error1:
2750 return ret;
2751}
2752
2753static void __exit ib_mad_cleanup_module(void)
2754{
2755 ib_unregister_client(&mad_client);
2756
2757 if (kmem_cache_destroy(ib_mad_cache)) {
2758 printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n");
2759 }
2760}
2761
2762module_init(ib_mad_init_module);
2763module_exit(ib_mad_cleanup_module);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002764