blob: aa4f8d4a4eba33873265e82907340a59c63a816d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Hal Rosenstockde493d42007-04-02 11:24:07 -04002 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
Hal Rosenstockfa619a72005-07-27 11:45:37 -07003 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07005 * Copyright (c) 2009 HNR Consulting. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 */
Ira Weiny7ef5d4b2014-08-08 19:00:53 -040036
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040041#include <linux/module.h>
Jack Morgenstein9874e742006-06-17 20:37:34 -070042#include <rdma/ib_cache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include "mad_priv.h"
Hal Rosenstockfa619a72005-07-27 11:45:37 -070045#include "mad_rmpp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include "smi.h"
47#include "agent.h"
48
49MODULE_LICENSE("Dual BSD/GPL");
50MODULE_DESCRIPTION("kernel IB MAD API");
51MODULE_AUTHOR("Hal Rosenstock");
52MODULE_AUTHOR("Sean Hefty");
53
Roland Dreier16933952010-05-23 21:39:31 -070054static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
55static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
Hal Rosenstockb76aabc2009-09-07 08:28:48 -070056
57module_param_named(send_queue_size, mad_sendq_size, int, 0444);
58MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
59module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
60MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
61
Linus Torvalds1da177e2005-04-16 15:20:36 -070062static struct list_head ib_mad_port_list;
63static u32 ib_mad_client_id = 0;
64
65/* Port list lock */
Roland Dreier6276e082009-09-05 20:24:23 -070066static DEFINE_SPINLOCK(ib_mad_port_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68/* Forward declarations */
69static int method_in_use(struct ib_mad_mgmt_method_table **method,
70 struct ib_mad_reg_req *mad_reg_req);
71static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
72static struct ib_mad_agent_private *find_mad_agent(
73 struct ib_mad_port_private *port_priv,
Ira Weinyd94bd262015-06-06 14:38:22 -040074 const struct ib_mad_hdr *mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
76 struct ib_mad_private *mad);
77static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
David Howellsc4028952006-11-22 14:57:56 +000078static void timeout_sends(struct work_struct *work);
79static void local_completions(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
81 struct ib_mad_agent_private *agent_priv,
82 u8 mgmt_class);
83static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
84 struct ib_mad_agent_private *agent_priv);
85
86/*
87 * Returns a ib_mad_port_private structure or NULL for a device/port
88 * Assumes ib_mad_port_list_lock is being held
89 */
90static inline struct ib_mad_port_private *
91__ib_get_mad_port(struct ib_device *device, int port_num)
92{
93 struct ib_mad_port_private *entry;
94
95 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
96 if (entry->device == device && entry->port_num == port_num)
97 return entry;
98 }
99 return NULL;
100}
101
102/*
103 * Wrapper function to return a ib_mad_port_private structure or NULL
104 * for a device/port
105 */
106static inline struct ib_mad_port_private *
107ib_get_mad_port(struct ib_device *device, int port_num)
108{
109 struct ib_mad_port_private *entry;
110 unsigned long flags;
111
112 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
113 entry = __ib_get_mad_port(device, port_num);
114 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
115
116 return entry;
117}
118
119static inline u8 convert_mgmt_class(u8 mgmt_class)
120{
121 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
122 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
123 0 : mgmt_class;
124}
125
126static int get_spl_qp_index(enum ib_qp_type qp_type)
127{
128 switch (qp_type)
129 {
130 case IB_QPT_SMI:
131 return 0;
132 case IB_QPT_GSI:
133 return 1;
134 default:
135 return -1;
136 }
137}
138
139static int vendor_class_index(u8 mgmt_class)
140{
141 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
142}
143
144static int is_vendor_class(u8 mgmt_class)
145{
146 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
147 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
148 return 0;
149 return 1;
150}
151
152static int is_vendor_oui(char *oui)
153{
154 if (oui[0] || oui[1] || oui[2])
155 return 1;
156 return 0;
157}
158
159static int is_vendor_method_in_use(
160 struct ib_mad_mgmt_vendor_class *vendor_class,
161 struct ib_mad_reg_req *mad_reg_req)
162{
163 struct ib_mad_mgmt_method_table *method;
164 int i;
165
166 for (i = 0; i < MAX_MGMT_OUI; i++) {
167 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
168 method = vendor_class->method_table[i];
169 if (method) {
170 if (method_in_use(&method, mad_reg_req))
171 return 1;
172 else
173 break;
174 }
175 }
176 }
177 return 0;
178}
179
Ira Weiny96909302015-05-08 14:27:22 -0400180int ib_response_mad(const struct ib_mad_hdr *hdr)
Sean Hefty2527e682006-07-20 11:25:50 +0300181{
Ira Weiny96909302015-05-08 14:27:22 -0400182 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
183 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
184 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
185 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
Sean Hefty2527e682006-07-20 11:25:50 +0300186}
187EXPORT_SYMBOL(ib_response_mad);
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189/*
190 * ib_register_mad_agent - Register to send/receive MADs
191 */
192struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
193 u8 port_num,
194 enum ib_qp_type qp_type,
195 struct ib_mad_reg_req *mad_reg_req,
196 u8 rmpp_version,
197 ib_mad_send_handler send_handler,
198 ib_mad_recv_handler recv_handler,
Ira Weiny0f29b462014-08-08 19:00:55 -0400199 void *context,
200 u32 registration_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
202 struct ib_mad_port_private *port_priv;
203 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
204 struct ib_mad_agent_private *mad_agent_priv;
205 struct ib_mad_reg_req *reg_req = NULL;
206 struct ib_mad_mgmt_class_table *class;
207 struct ib_mad_mgmt_vendor_class_table *vendor;
208 struct ib_mad_mgmt_vendor_class *vendor_class;
209 struct ib_mad_mgmt_method_table *method;
210 int ret2, qpn;
211 unsigned long flags;
212 u8 mgmt_class, vclass;
213
214 /* Validate parameters */
215 qpn = get_spl_qp_index(qp_type);
Ira Weiny9ad13a42014-08-08 19:00:54 -0400216 if (qpn == -1) {
217 dev_notice(&device->dev,
218 "ib_register_mad_agent: invalid QP Type %d\n",
219 qp_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400221 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
Ira Weiny9ad13a42014-08-08 19:00:54 -0400223 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
224 dev_notice(&device->dev,
225 "ib_register_mad_agent: invalid RMPP Version %u\n",
226 rmpp_version);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700227 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400228 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
230 /* Validate MAD registration request if supplied */
231 if (mad_reg_req) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400232 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
233 dev_notice(&device->dev,
234 "ib_register_mad_agent: invalid Class Version %u\n",
235 mad_reg_req->mgmt_class_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400237 }
238 if (!recv_handler) {
239 dev_notice(&device->dev,
240 "ib_register_mad_agent: no recv_handler\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400242 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
244 /*
245 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
246 * one in this range currently allowed
247 */
248 if (mad_reg_req->mgmt_class !=
Ira Weiny9ad13a42014-08-08 19:00:54 -0400249 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
250 dev_notice(&device->dev,
251 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
252 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400254 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 } else if (mad_reg_req->mgmt_class == 0) {
256 /*
257 * Class 0 is reserved in IBA and is used for
258 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
259 */
Ira Weiny9ad13a42014-08-08 19:00:54 -0400260 dev_notice(&device->dev,
261 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 goto error1;
263 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
264 /*
265 * If class is in "new" vendor range,
266 * ensure supplied OUI is not zero
267 */
Ira Weiny9ad13a42014-08-08 19:00:54 -0400268 if (!is_vendor_oui(mad_reg_req->oui)) {
269 dev_notice(&device->dev,
270 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
271 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 }
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800275 /* Make sure class supplied is consistent with RMPP */
Hal Rosenstock64cb9c62006-04-12 21:29:10 -0400276 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400277 if (rmpp_version) {
278 dev_notice(&device->dev,
279 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
280 mad_reg_req->mgmt_class);
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800281 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400282 }
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800283 }
Ira Weiny1471cb62014-08-08 19:00:56 -0400284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 /* Make sure class supplied is consistent with QP type */
286 if (qp_type == IB_QPT_SMI) {
287 if ((mad_reg_req->mgmt_class !=
288 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
289 (mad_reg_req->mgmt_class !=
Ira Weiny9ad13a42014-08-08 19:00:54 -0400290 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
291 dev_notice(&device->dev,
292 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
293 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400295 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 } else {
297 if ((mad_reg_req->mgmt_class ==
298 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
299 (mad_reg_req->mgmt_class ==
Ira Weiny9ad13a42014-08-08 19:00:54 -0400300 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
301 dev_notice(&device->dev,
302 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
303 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400305 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 }
307 } else {
308 /* No registration request supplied */
309 if (!send_handler)
310 goto error1;
Ira Weiny1471cb62014-08-08 19:00:56 -0400311 if (registration_flags & IB_MAD_USER_RMPP)
312 goto error1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 }
314
315 /* Validate device and port */
316 port_priv = ib_get_mad_port(device, port_num);
317 if (!port_priv) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400318 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 ret = ERR_PTR(-ENODEV);
320 goto error1;
321 }
322
Ira Weinyc8367c42011-05-19 18:19:28 -0700323 /* Verify the QP requested is supported. For example, Ethernet devices
324 * will not have QP0 */
325 if (!port_priv->qp_info[qpn].qp) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400326 dev_notice(&device->dev,
327 "ib_register_mad_agent: QP %d not supported\n", qpn);
Ira Weinyc8367c42011-05-19 18:19:28 -0700328 ret = ERR_PTR(-EPROTONOSUPPORT);
329 goto error1;
330 }
331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 /* Allocate structures */
Roland Dreierde6eb662005-11-02 07:23:14 -0800333 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 if (!mad_agent_priv) {
335 ret = ERR_PTR(-ENOMEM);
336 goto error1;
337 }
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700338
339 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
340 IB_ACCESS_LOCAL_WRITE);
341 if (IS_ERR(mad_agent_priv->agent.mr)) {
342 ret = ERR_PTR(-ENOMEM);
343 goto error2;
344 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
346 if (mad_reg_req) {
Julia Lawall9893e742010-05-15 23:22:38 +0200347 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (!reg_req) {
349 ret = ERR_PTR(-ENOMEM);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700350 goto error3;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 }
353
354 /* Now, fill in the various structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
356 mad_agent_priv->reg_req = reg_req;
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700357 mad_agent_priv->agent.rmpp_version = rmpp_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 mad_agent_priv->agent.device = device;
359 mad_agent_priv->agent.recv_handler = recv_handler;
360 mad_agent_priv->agent.send_handler = send_handler;
361 mad_agent_priv->agent.context = context;
362 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
363 mad_agent_priv->agent.port_num = port_num;
Ira Weiny0f29b462014-08-08 19:00:55 -0400364 mad_agent_priv->agent.flags = registration_flags;
Ralph Campbelld9620a42009-02-27 14:44:32 -0800365 spin_lock_init(&mad_agent_priv->lock);
366 INIT_LIST_HEAD(&mad_agent_priv->send_list);
367 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
368 INIT_LIST_HEAD(&mad_agent_priv->done_list);
369 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
370 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
371 INIT_LIST_HEAD(&mad_agent_priv->local_list);
372 INIT_WORK(&mad_agent_priv->local_work, local_completions);
373 atomic_set(&mad_agent_priv->refcount, 1);
374 init_completion(&mad_agent_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
376 spin_lock_irqsave(&port_priv->reg_lock, flags);
377 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
378
379 /*
380 * Make sure MAD registration (if supplied)
381 * is non overlapping with any existing ones
382 */
383 if (mad_reg_req) {
384 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
385 if (!is_vendor_class(mgmt_class)) {
386 class = port_priv->version[mad_reg_req->
387 mgmt_class_version].class;
388 if (class) {
389 method = class->method_table[mgmt_class];
390 if (method) {
391 if (method_in_use(&method,
392 mad_reg_req))
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700393 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 }
395 }
396 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
397 mgmt_class);
398 } else {
399 /* "New" vendor class range */
400 vendor = port_priv->version[mad_reg_req->
401 mgmt_class_version].vendor;
402 if (vendor) {
403 vclass = vendor_class_index(mgmt_class);
404 vendor_class = vendor->vendor_class[vclass];
405 if (vendor_class) {
406 if (is_vendor_method_in_use(
407 vendor_class,
408 mad_reg_req))
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700409 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 }
411 }
412 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
413 }
414 if (ret2) {
415 ret = ERR_PTR(ret2);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700416 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 }
418 }
419
420 /* Add mad agent into port's agent list */
421 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
422 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 return &mad_agent_priv->agent;
425
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700426error4:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
428 kfree(reg_req);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700429error3:
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700430 ib_dereg_mr(mad_agent_priv->agent.mr);
Adrian Bunk2012a112005-11-27 00:37:36 +0100431error2:
432 kfree(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433error1:
434 return ret;
435}
436EXPORT_SYMBOL(ib_register_mad_agent);
437
438static inline int is_snooping_sends(int mad_snoop_flags)
439{
440 return (mad_snoop_flags &
441 (/*IB_MAD_SNOOP_POSTED_SENDS |
442 IB_MAD_SNOOP_RMPP_SENDS |*/
443 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
444 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
445}
446
447static inline int is_snooping_recvs(int mad_snoop_flags)
448{
449 return (mad_snoop_flags &
450 (IB_MAD_SNOOP_RECVS /*|
451 IB_MAD_SNOOP_RMPP_RECVS*/));
452}
453
454static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
455 struct ib_mad_snoop_private *mad_snoop_priv)
456{
457 struct ib_mad_snoop_private **new_snoop_table;
458 unsigned long flags;
459 int i;
460
461 spin_lock_irqsave(&qp_info->snoop_lock, flags);
462 /* Check for empty slot in array. */
463 for (i = 0; i < qp_info->snoop_table_size; i++)
464 if (!qp_info->snoop_table[i])
465 break;
466
467 if (i == qp_info->snoop_table_size) {
468 /* Grow table. */
Roland Dreier528051742008-10-14 14:05:36 -0700469 new_snoop_table = krealloc(qp_info->snoop_table,
470 sizeof mad_snoop_priv *
471 (qp_info->snoop_table_size + 1),
472 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 if (!new_snoop_table) {
474 i = -ENOMEM;
475 goto out;
476 }
Roland Dreier528051742008-10-14 14:05:36 -0700477
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 qp_info->snoop_table = new_snoop_table;
479 qp_info->snoop_table_size++;
480 }
481 qp_info->snoop_table[i] = mad_snoop_priv;
482 atomic_inc(&qp_info->snoop_count);
483out:
484 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
485 return i;
486}
487
488struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
489 u8 port_num,
490 enum ib_qp_type qp_type,
491 int mad_snoop_flags,
492 ib_mad_snoop_handler snoop_handler,
493 ib_mad_recv_handler recv_handler,
494 void *context)
495{
496 struct ib_mad_port_private *port_priv;
497 struct ib_mad_agent *ret;
498 struct ib_mad_snoop_private *mad_snoop_priv;
499 int qpn;
500
501 /* Validate parameters */
502 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
503 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
504 ret = ERR_PTR(-EINVAL);
505 goto error1;
506 }
507 qpn = get_spl_qp_index(qp_type);
508 if (qpn == -1) {
509 ret = ERR_PTR(-EINVAL);
510 goto error1;
511 }
512 port_priv = ib_get_mad_port(device, port_num);
513 if (!port_priv) {
514 ret = ERR_PTR(-ENODEV);
515 goto error1;
516 }
517 /* Allocate structures */
Roland Dreierde6eb662005-11-02 07:23:14 -0800518 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 if (!mad_snoop_priv) {
520 ret = ERR_PTR(-ENOMEM);
521 goto error1;
522 }
523
524 /* Now, fill in the various structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
526 mad_snoop_priv->agent.device = device;
527 mad_snoop_priv->agent.recv_handler = recv_handler;
528 mad_snoop_priv->agent.snoop_handler = snoop_handler;
529 mad_snoop_priv->agent.context = context;
530 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
531 mad_snoop_priv->agent.port_num = port_num;
532 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
Sean Hefty1b52fa982006-05-12 14:57:52 -0700533 init_completion(&mad_snoop_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 mad_snoop_priv->snoop_index = register_snoop_agent(
535 &port_priv->qp_info[qpn],
536 mad_snoop_priv);
537 if (mad_snoop_priv->snoop_index < 0) {
538 ret = ERR_PTR(mad_snoop_priv->snoop_index);
539 goto error2;
540 }
541
542 atomic_set(&mad_snoop_priv->refcount, 1);
543 return &mad_snoop_priv->agent;
544
545error2:
546 kfree(mad_snoop_priv);
547error1:
548 return ret;
549}
550EXPORT_SYMBOL(ib_register_mad_snoop);
551
Sean Hefty1b52fa982006-05-12 14:57:52 -0700552static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
553{
554 if (atomic_dec_and_test(&mad_agent_priv->refcount))
555 complete(&mad_agent_priv->comp);
556}
557
558static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
559{
560 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
561 complete(&mad_snoop_priv->comp);
562}
563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
565{
566 struct ib_mad_port_private *port_priv;
567 unsigned long flags;
568
569 /* Note that we could still be handling received MADs */
570
571 /*
572 * Canceling all sends results in dropping received response
573 * MADs, preventing us from queuing additional work
574 */
575 cancel_mads(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 port_priv = mad_agent_priv->qp_info->port_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
579 spin_lock_irqsave(&port_priv->reg_lock, flags);
580 remove_mad_reg_req(mad_agent_priv);
581 list_del(&mad_agent_priv->agent_list);
582 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
583
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700584 flush_workqueue(port_priv->wq);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700585 ib_cancel_rmpp_recvs(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
Sean Hefty1b52fa982006-05-12 14:57:52 -0700587 deref_mad_agent(mad_agent_priv);
588 wait_for_completion(&mad_agent_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
Jesper Juhl6044ec82005-11-07 01:01:32 -0800590 kfree(mad_agent_priv->reg_req);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700591 ib_dereg_mr(mad_agent_priv->agent.mr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 kfree(mad_agent_priv);
593}
594
595static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
596{
597 struct ib_mad_qp_info *qp_info;
598 unsigned long flags;
599
600 qp_info = mad_snoop_priv->qp_info;
601 spin_lock_irqsave(&qp_info->snoop_lock, flags);
602 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
603 atomic_dec(&qp_info->snoop_count);
604 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
605
Sean Hefty1b52fa982006-05-12 14:57:52 -0700606 deref_snoop_agent(mad_snoop_priv);
607 wait_for_completion(&mad_snoop_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
609 kfree(mad_snoop_priv);
610}
611
612/*
613 * ib_unregister_mad_agent - Unregisters a client from using MAD services
614 */
615int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
616{
617 struct ib_mad_agent_private *mad_agent_priv;
618 struct ib_mad_snoop_private *mad_snoop_priv;
619
620 /* If the TID is zero, the agent can only snoop. */
621 if (mad_agent->hi_tid) {
622 mad_agent_priv = container_of(mad_agent,
623 struct ib_mad_agent_private,
624 agent);
625 unregister_mad_agent(mad_agent_priv);
626 } else {
627 mad_snoop_priv = container_of(mad_agent,
628 struct ib_mad_snoop_private,
629 agent);
630 unregister_mad_snoop(mad_snoop_priv);
631 }
632 return 0;
633}
634EXPORT_SYMBOL(ib_unregister_mad_agent);
635
636static void dequeue_mad(struct ib_mad_list_head *mad_list)
637{
638 struct ib_mad_queue *mad_queue;
639 unsigned long flags;
640
641 BUG_ON(!mad_list->mad_queue);
642 mad_queue = mad_list->mad_queue;
643 spin_lock_irqsave(&mad_queue->lock, flags);
644 list_del(&mad_list->list);
645 mad_queue->count--;
646 spin_unlock_irqrestore(&mad_queue->lock, flags);
647}
648
649static void snoop_send(struct ib_mad_qp_info *qp_info,
Sean Hefty34816ad2005-10-25 10:51:39 -0700650 struct ib_mad_send_buf *send_buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 struct ib_mad_send_wc *mad_send_wc,
652 int mad_snoop_flags)
653{
654 struct ib_mad_snoop_private *mad_snoop_priv;
655 unsigned long flags;
656 int i;
657
658 spin_lock_irqsave(&qp_info->snoop_lock, flags);
659 for (i = 0; i < qp_info->snoop_table_size; i++) {
660 mad_snoop_priv = qp_info->snoop_table[i];
661 if (!mad_snoop_priv ||
662 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
663 continue;
664
665 atomic_inc(&mad_snoop_priv->refcount);
666 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
667 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
Sean Hefty34816ad2005-10-25 10:51:39 -0700668 send_buf, mad_send_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -0700669 deref_snoop_agent(mad_snoop_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 spin_lock_irqsave(&qp_info->snoop_lock, flags);
671 }
672 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
673}
674
675static void snoop_recv(struct ib_mad_qp_info *qp_info,
676 struct ib_mad_recv_wc *mad_recv_wc,
677 int mad_snoop_flags)
678{
679 struct ib_mad_snoop_private *mad_snoop_priv;
680 unsigned long flags;
681 int i;
682
683 spin_lock_irqsave(&qp_info->snoop_lock, flags);
684 for (i = 0; i < qp_info->snoop_table_size; i++) {
685 mad_snoop_priv = qp_info->snoop_table[i];
686 if (!mad_snoop_priv ||
687 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
688 continue;
689
690 atomic_inc(&mad_snoop_priv->refcount);
691 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
692 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
693 mad_recv_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -0700694 deref_snoop_agent(mad_snoop_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 spin_lock_irqsave(&qp_info->snoop_lock, flags);
696 }
697 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
698}
699
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200700static void build_smp_wc(struct ib_qp *qp,
701 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 struct ib_wc *wc)
703{
704 memset(wc, 0, sizeof *wc);
705 wc->wr_id = wr_id;
706 wc->status = IB_WC_SUCCESS;
707 wc->opcode = IB_WC_RECV;
708 wc->pkey_index = pkey_index;
709 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
710 wc->src_qp = IB_QP0;
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200711 wc->qp = qp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 wc->slid = slid;
713 wc->sl = 0;
714 wc->dlid_path_bits = 0;
715 wc->port_num = port_num;
716}
717
Ira Weinyc9082e52015-06-06 14:38:30 -0400718static size_t mad_priv_size(const struct ib_mad_private *mp)
719{
720 return sizeof(struct ib_mad_private) + mp->mad_size;
721}
722
723static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
724{
725 size_t size = sizeof(struct ib_mad_private) + mad_size;
726 struct ib_mad_private *ret = kzalloc(size, flags);
727
728 if (ret)
729 ret->mad_size = mad_size;
730
731 return ret;
732}
733
734static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
735{
736 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
737}
738
739static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
740{
741 return sizeof(struct ib_grh) + mp->mad_size;
742}
743
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744/*
745 * Return 0 if SMP is to be sent
746 * Return 1 if SMP was consumed locally (whether or not solicited)
747 * Return < 0 if error
748 */
749static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
Sean Hefty34816ad2005-10-25 10:51:39 -0700750 struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751{
Hal Rosenstockde493d42007-04-02 11:24:07 -0400752 int ret = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -0700753 struct ib_smp *smp = mad_send_wr->send_buf.mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 unsigned long flags;
755 struct ib_mad_local_private *local;
756 struct ib_mad_private *mad_priv;
757 struct ib_mad_port_private *port_priv;
758 struct ib_mad_agent_private *recv_mad_agent = NULL;
759 struct ib_device *device = mad_agent_priv->agent.device;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -0400760 u8 port_num;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 struct ib_wc mad_wc;
Sean Hefty34816ad2005-10-25 10:51:39 -0700762 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
Ira Weinyc9082e52015-06-06 14:38:30 -0400763 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
Ira Weiny4cd7c942015-06-06 14:38:31 -0400764 u16 out_mad_pkey_index = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
Hal Rosenstock1bae4db2007-05-14 17:21:52 -0400766 if (device->node_type == RDMA_NODE_IB_SWITCH &&
767 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
768 port_num = send_wr->wr.ud.port_num;
769 else
770 port_num = mad_agent_priv->agent.port_num;
771
Ralph Campbell8cf3f042006-02-03 14:28:48 -0800772 /*
773 * Directed route handling starts if the initial LID routed part of
774 * a request or the ending LID routed part of a response is empty.
775 * If we are at the start of the LID routed part, don't update the
776 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
777 */
778 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
779 IB_LID_PERMISSIVE &&
Hal Rosenstockde493d42007-04-02 11:24:07 -0400780 smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
781 IB_SMI_DISCARD) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 ret = -EINVAL;
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400783 dev_err(&device->dev, "Invalid directed route\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 goto out;
785 }
Hal Rosenstockde493d42007-04-02 11:24:07 -0400786
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 /* Check to post send on QP or process locally */
Steve Welch727792d2007-10-23 15:06:10 -0700788 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
789 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 goto out;
791
792 local = kmalloc(sizeof *local, GFP_ATOMIC);
793 if (!local) {
794 ret = -ENOMEM;
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400795 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 goto out;
797 }
798 local->mad_priv = NULL;
799 local->recv_mad_agent = NULL;
Ira Weinyc9082e52015-06-06 14:38:30 -0400800 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 if (!mad_priv) {
802 ret = -ENOMEM;
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400803 dev_err(&device->dev, "No memory for local response MAD\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 kfree(local);
805 goto out;
806 }
807
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200808 build_smp_wc(mad_agent_priv->agent.qp,
809 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
Sean Hefty97f52eb2005-08-13 21:05:57 -0700810 send_wr->wr.ud.pkey_index,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 send_wr->wr.ud.port_num, &mad_wc);
812
813 /* No GRH for DR SMP */
814 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
Ira Weiny4cd7c942015-06-06 14:38:31 -0400815 (const struct ib_mad_hdr *)smp, mad_size,
816 (struct ib_mad_hdr *)mad_priv->mad,
817 &mad_size, &out_mad_pkey_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 switch (ret)
819 {
820 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
Ira Weinyc9082e52015-06-06 14:38:30 -0400821 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 mad_agent_priv->agent.recv_handler) {
823 local->mad_priv = mad_priv;
824 local->recv_mad_agent = mad_agent_priv;
825 /*
826 * Reference MAD agent until receive
827 * side of local completion handled
828 */
829 atomic_inc(&mad_agent_priv->refcount);
830 } else
Ira Weinyc9082e52015-06-06 14:38:30 -0400831 kfree(mad_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 break;
833 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
Ira Weinyc9082e52015-06-06 14:38:30 -0400834 kfree(mad_priv);
Ralph Campbell4780c192009-03-03 14:22:17 -0800835 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 case IB_MAD_RESULT_SUCCESS:
837 /* Treat like an incoming receive MAD */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
839 mad_agent_priv->agent.port_num);
840 if (port_priv) {
Ira Weinyc9082e52015-06-06 14:38:30 -0400841 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 recv_mad_agent = find_mad_agent(port_priv,
Ira Weinyc9082e52015-06-06 14:38:30 -0400843 (const struct ib_mad_hdr *)mad_priv->mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 }
845 if (!port_priv || !recv_mad_agent) {
Ralph Campbell4780c192009-03-03 14:22:17 -0800846 /*
847 * No receiving agent so drop packet and
848 * generate send completion.
849 */
Ira Weinyc9082e52015-06-06 14:38:30 -0400850 kfree(mad_priv);
Ralph Campbell4780c192009-03-03 14:22:17 -0800851 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 }
853 local->mad_priv = mad_priv;
854 local->recv_mad_agent = recv_mad_agent;
855 break;
856 default:
Ira Weinyc9082e52015-06-06 14:38:30 -0400857 kfree(mad_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 kfree(local);
859 ret = -EINVAL;
860 goto out;
861 }
862
Sean Hefty34816ad2005-10-25 10:51:39 -0700863 local->mad_send_wr = mad_send_wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 /* Reference MAD agent until send side of local completion handled */
865 atomic_inc(&mad_agent_priv->refcount);
866 /* Queue local completion to local list */
867 spin_lock_irqsave(&mad_agent_priv->lock, flags);
868 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
869 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
870 queue_work(mad_agent_priv->qp_info->port_priv->wq,
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700871 &mad_agent_priv->local_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 ret = 1;
873out:
874 return ret;
875}
876
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800877static int get_pad_size(int hdr_len, int data_len)
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700878{
879 int seg_size, pad;
880
881 seg_size = sizeof(struct ib_mad) - hdr_len;
882 if (data_len && seg_size) {
883 pad = seg_size - data_len % seg_size;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800884 return pad == seg_size ? 0 : pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700885 } else
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800886 return seg_size;
887}
888
889static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
890{
891 struct ib_rmpp_segment *s, *t;
892
893 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
894 list_del(&s->list);
895 kfree(s);
896 }
897}
898
899static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
900 gfp_t gfp_mask)
901{
902 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
903 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
904 struct ib_rmpp_segment *seg = NULL;
905 int left, seg_size, pad;
906
Ira Weinyc9082e52015-06-06 14:38:30 -0400907 send_buf->seg_size = sizeof(struct ib_mad) - send_buf->hdr_len;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800908 seg_size = send_buf->seg_size;
909 pad = send_wr->pad;
910
911 /* Allocate data segments. */
912 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
913 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
914 if (!seg) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400915 dev_err(&send_buf->mad_agent->device->dev,
916 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
917 sizeof (*seg) + seg_size, gfp_mask);
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800918 free_send_rmpp_list(send_wr);
919 return -ENOMEM;
920 }
921 seg->num = ++send_buf->seg_count;
922 list_add_tail(&seg->list, &send_wr->rmpp_list);
923 }
924
925 /* Zero any padding */
926 if (pad)
927 memset(seg->data + seg_size - pad, 0, pad);
928
929 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
930 agent.rmpp_version;
931 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
932 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
933
934 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
935 struct ib_rmpp_segment, list);
936 send_wr->last_ack_seg = send_wr->cur_seg;
937 return 0;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700938}
939
Ira Weinyf766c582015-05-08 14:27:24 -0400940int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
Ira Weiny1471cb62014-08-08 19:00:56 -0400941{
942 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
943}
944EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
945
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700946struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
947 u32 remote_qpn, u16 pkey_index,
Sean Hefty34816ad2005-10-25 10:51:39 -0700948 int rmpp_active,
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700949 int hdr_len, int data_len,
Ira Weinyda2dfaa2015-06-06 14:38:28 -0400950 gfp_t gfp_mask,
951 u8 base_version)
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700952{
953 struct ib_mad_agent_private *mad_agent_priv;
Sean Hefty34816ad2005-10-25 10:51:39 -0700954 struct ib_mad_send_wr_private *mad_send_wr;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800955 int pad, message_size, ret, size;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700956 void *buf;
957
Sean Hefty34816ad2005-10-25 10:51:39 -0700958 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
959 agent);
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800960 pad = get_pad_size(hdr_len, data_len);
961 message_size = hdr_len + data_len + pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700962
Ira Weiny1471cb62014-08-08 19:00:56 -0400963 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
964 if (!rmpp_active && message_size > sizeof(struct ib_mad))
965 return ERR_PTR(-EINVAL);
966 } else
967 if (rmpp_active || message_size > sizeof(struct ib_mad))
968 return ERR_PTR(-EINVAL);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700969
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800970 size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
971 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700972 if (!buf)
973 return ERR_PTR(-ENOMEM);
974
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800975 mad_send_wr = buf + size;
976 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
Sean Hefty34816ad2005-10-25 10:51:39 -0700977 mad_send_wr->send_buf.mad = buf;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800978 mad_send_wr->send_buf.hdr_len = hdr_len;
979 mad_send_wr->send_buf.data_len = data_len;
980 mad_send_wr->pad = pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700981
Sean Hefty34816ad2005-10-25 10:51:39 -0700982 mad_send_wr->mad_agent_priv = mad_agent_priv;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800983 mad_send_wr->sg_list[0].length = hdr_len;
Sean Hefty34816ad2005-10-25 10:51:39 -0700984 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800985 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
986 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700987
Sean Hefty34816ad2005-10-25 10:51:39 -0700988 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
989 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800990 mad_send_wr->send_wr.num_sge = 2;
Sean Hefty34816ad2005-10-25 10:51:39 -0700991 mad_send_wr->send_wr.opcode = IB_WR_SEND;
992 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
993 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
994 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
995 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700996
997 if (rmpp_active) {
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800998 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
999 if (ret) {
1000 kfree(buf);
1001 return ERR_PTR(ret);
1002 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001003 }
1004
Sean Hefty34816ad2005-10-25 10:51:39 -07001005 mad_send_wr->send_buf.mad_agent = mad_agent;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001006 atomic_inc(&mad_agent_priv->refcount);
Sean Hefty34816ad2005-10-25 10:51:39 -07001007 return &mad_send_wr->send_buf;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001008}
1009EXPORT_SYMBOL(ib_create_send_mad);
1010
Hal Rosenstock618a3c02006-03-28 16:40:04 -08001011int ib_get_mad_data_offset(u8 mgmt_class)
1012{
1013 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1014 return IB_MGMT_SA_HDR;
1015 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1016 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1017 (mgmt_class == IB_MGMT_CLASS_BIS))
1018 return IB_MGMT_DEVICE_HDR;
1019 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1020 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1021 return IB_MGMT_VENDOR_HDR;
1022 else
1023 return IB_MGMT_MAD_HDR;
1024}
1025EXPORT_SYMBOL(ib_get_mad_data_offset);
1026
1027int ib_is_mad_class_rmpp(u8 mgmt_class)
1028{
1029 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1030 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1031 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1032 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1033 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1034 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1035 return 1;
1036 return 0;
1037}
1038EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1039
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001040void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1041{
1042 struct ib_mad_send_wr_private *mad_send_wr;
1043 struct list_head *list;
1044
1045 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1046 send_buf);
1047 list = &mad_send_wr->cur_seg->list;
1048
1049 if (mad_send_wr->cur_seg->num < seg_num) {
1050 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1051 if (mad_send_wr->cur_seg->num == seg_num)
1052 break;
1053 } else if (mad_send_wr->cur_seg->num > seg_num) {
1054 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1055 if (mad_send_wr->cur_seg->num == seg_num)
1056 break;
1057 }
1058 return mad_send_wr->cur_seg->data;
1059}
1060EXPORT_SYMBOL(ib_get_rmpp_segment);
1061
1062static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1063{
1064 if (mad_send_wr->send_buf.seg_count)
1065 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1066 mad_send_wr->seg_num);
1067 else
1068 return mad_send_wr->send_buf.mad +
1069 mad_send_wr->send_buf.hdr_len;
1070}
1071
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001072void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1073{
1074 struct ib_mad_agent_private *mad_agent_priv;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001075 struct ib_mad_send_wr_private *mad_send_wr;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001076
1077 mad_agent_priv = container_of(send_buf->mad_agent,
1078 struct ib_mad_agent_private, agent);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001079 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1080 send_buf);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001081
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001082 free_send_rmpp_list(mad_send_wr);
1083 kfree(send_buf->mad);
Sean Hefty1b52fa982006-05-12 14:57:52 -07001084 deref_mad_agent(mad_agent_priv);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001085}
1086EXPORT_SYMBOL(ib_free_send_mad);
1087
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001088int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089{
1090 struct ib_mad_qp_info *qp_info;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001091 struct list_head *list;
Sean Hefty34816ad2005-10-25 10:51:39 -07001092 struct ib_send_wr *bad_send_wr;
1093 struct ib_mad_agent *mad_agent;
1094 struct ib_sge *sge;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 unsigned long flags;
1096 int ret;
1097
Hal Rosenstockf8197a42005-07-27 11:45:24 -07001098 /* Set WR ID to find mad_send_wr upon completion */
Hal Rosenstockd760ce82005-07-27 11:45:25 -07001099 qp_info = mad_send_wr->mad_agent_priv->qp_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1101 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1102
Sean Hefty34816ad2005-10-25 10:51:39 -07001103 mad_agent = mad_send_wr->send_buf.mad_agent;
1104 sge = mad_send_wr->sg_list;
Ralph Campbell15271062006-12-12 14:28:30 -08001105 sge[0].addr = ib_dma_map_single(mad_agent->device,
1106 mad_send_wr->send_buf.mad,
1107 sge[0].length,
1108 DMA_TO_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02001109 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1110 return -ENOMEM;
1111
Ralph Campbell15271062006-12-12 14:28:30 -08001112 mad_send_wr->header_mapping = sge[0].addr;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001113
Ralph Campbell15271062006-12-12 14:28:30 -08001114 sge[1].addr = ib_dma_map_single(mad_agent->device,
1115 ib_get_payload(mad_send_wr),
1116 sge[1].length,
1117 DMA_TO_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02001118 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1119 ib_dma_unmap_single(mad_agent->device,
1120 mad_send_wr->header_mapping,
1121 sge[0].length, DMA_TO_DEVICE);
1122 return -ENOMEM;
1123 }
Ralph Campbell15271062006-12-12 14:28:30 -08001124 mad_send_wr->payload_mapping = sge[1].addr;
Sean Hefty34816ad2005-10-25 10:51:39 -07001125
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001127 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
Sean Hefty34816ad2005-10-25 10:51:39 -07001128 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1129 &bad_send_wr);
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001130 list = &qp_info->send_queue.list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 ret = 0;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001133 list = &qp_info->overflow_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 }
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001135
1136 if (!ret) {
1137 qp_info->send_queue.count++;
1138 list_add_tail(&mad_send_wr->mad_list.list, list);
1139 }
1140 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001141 if (ret) {
Ralph Campbell15271062006-12-12 14:28:30 -08001142 ib_dma_unmap_single(mad_agent->device,
1143 mad_send_wr->header_mapping,
1144 sge[0].length, DMA_TO_DEVICE);
1145 ib_dma_unmap_single(mad_agent->device,
1146 mad_send_wr->payload_mapping,
1147 sge[1].length, DMA_TO_DEVICE);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001148 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 return ret;
1150}
1151
1152/*
1153 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1154 * with the registered client
1155 */
Sean Hefty34816ad2005-10-25 10:51:39 -07001156int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1157 struct ib_mad_send_buf **bad_send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 struct ib_mad_agent_private *mad_agent_priv;
Sean Hefty34816ad2005-10-25 10:51:39 -07001160 struct ib_mad_send_buf *next_send_buf;
1161 struct ib_mad_send_wr_private *mad_send_wr;
1162 unsigned long flags;
1163 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
1165 /* Walk list of send WRs and post each on send list */
Sean Hefty34816ad2005-10-25 10:51:39 -07001166 for (; send_buf; send_buf = next_send_buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
Sean Hefty34816ad2005-10-25 10:51:39 -07001168 mad_send_wr = container_of(send_buf,
1169 struct ib_mad_send_wr_private,
1170 send_buf);
1171 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
Sean Hefty34816ad2005-10-25 10:51:39 -07001173 if (!send_buf->mad_agent->send_handler ||
1174 (send_buf->timeout_ms &&
1175 !send_buf->mad_agent->recv_handler)) {
1176 ret = -EINVAL;
1177 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 }
1179
Hal Rosenstock618a3c02006-03-28 16:40:04 -08001180 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1181 if (mad_agent_priv->agent.rmpp_version) {
1182 ret = -EINVAL;
1183 goto error;
1184 }
1185 }
1186
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 /*
1188 * Save pointer to next work request to post in case the
1189 * current one completes, and the user modifies the work
1190 * request associated with the completion
1191 */
Sean Hefty34816ad2005-10-25 10:51:39 -07001192 next_send_buf = send_buf->next;
1193 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
Sean Hefty34816ad2005-10-25 10:51:39 -07001195 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1196 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1197 ret = handle_outgoing_dr_smp(mad_agent_priv,
1198 mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 if (ret < 0) /* error */
Sean Hefty34816ad2005-10-25 10:51:39 -07001200 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 else if (ret == 1) /* locally consumed */
Sean Hefty34816ad2005-10-25 10:51:39 -07001202 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 }
1204
Sean Hefty34816ad2005-10-25 10:51:39 -07001205 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 /* Timeout will be updated after send completes */
Sean Hefty34816ad2005-10-25 10:51:39 -07001207 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
Sean Hefty4fc8cd42007-11-27 00:11:04 -08001208 mad_send_wr->max_retries = send_buf->retries;
1209 mad_send_wr->retries_left = send_buf->retries;
1210 send_buf->retries = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001211 /* Reference for work request to QP + response */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1213 mad_send_wr->status = IB_WC_SUCCESS;
1214
1215 /* Reference MAD agent until send completes */
1216 atomic_inc(&mad_agent_priv->refcount);
1217 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1218 list_add_tail(&mad_send_wr->agent_list,
1219 &mad_agent_priv->send_list);
1220 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1221
Ira Weiny1471cb62014-08-08 19:00:56 -04001222 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001223 ret = ib_send_rmpp_mad(mad_send_wr);
1224 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1225 ret = ib_send_mad(mad_send_wr);
1226 } else
1227 ret = ib_send_mad(mad_send_wr);
1228 if (ret < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 /* Fail send request */
1230 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1231 list_del(&mad_send_wr->agent_list);
1232 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1233 atomic_dec(&mad_agent_priv->refcount);
Sean Hefty34816ad2005-10-25 10:51:39 -07001234 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 }
1237 return 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001238error:
1239 if (bad_send_buf)
1240 *bad_send_buf = send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 return ret;
1242}
1243EXPORT_SYMBOL(ib_post_send_mad);
1244
1245/*
1246 * ib_free_recv_mad - Returns data buffers used to receive
1247 * a MAD to the access layer
1248 */
1249void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1250{
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001251 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 struct ib_mad_private_header *mad_priv_hdr;
1253 struct ib_mad_private *priv;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001254 struct list_head free_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001256 INIT_LIST_HEAD(&free_list);
1257 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001259 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1260 &free_list, list) {
1261 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1262 recv_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 mad_priv_hdr = container_of(mad_recv_wc,
1264 struct ib_mad_private_header,
1265 recv_wc);
1266 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1267 header);
Ira Weinyc9082e52015-06-06 14:38:30 -04001268 kfree(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270}
1271EXPORT_SYMBOL(ib_free_recv_mad);
1272
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1274 u8 rmpp_version,
1275 ib_mad_send_handler send_handler,
1276 ib_mad_recv_handler recv_handler,
1277 void *context)
1278{
1279 return ERR_PTR(-EINVAL); /* XXX: for now */
1280}
1281EXPORT_SYMBOL(ib_redirect_mad_qp);
1282
1283int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1284 struct ib_wc *wc)
1285{
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001286 dev_err(&mad_agent->device->dev,
1287 "ib_process_mad_wc() not implemented yet\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 return 0;
1289}
1290EXPORT_SYMBOL(ib_process_mad_wc);
1291
1292static int method_in_use(struct ib_mad_mgmt_method_table **method,
1293 struct ib_mad_reg_req *mad_reg_req)
1294{
1295 int i;
1296
Akinobu Mita19b629f2010-03-05 13:41:38 -08001297 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 if ((*method)->agent[i]) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001299 pr_err("Method %d already in use\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 return -EINVAL;
1301 }
1302 }
1303 return 0;
1304}
1305
1306static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1307{
1308 /* Allocate management method table */
Roland Dreierde6eb662005-11-02 07:23:14 -08001309 *method = kzalloc(sizeof **method, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 if (!*method) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001311 pr_err("No memory for ib_mad_mgmt_method_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 return -ENOMEM;
1313 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314
1315 return 0;
1316}
1317
1318/*
1319 * Check to see if there are any methods still in use
1320 */
1321static int check_method_table(struct ib_mad_mgmt_method_table *method)
1322{
1323 int i;
1324
1325 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1326 if (method->agent[i])
1327 return 1;
1328 return 0;
1329}
1330
1331/*
1332 * Check to see if there are any method tables for this class still in use
1333 */
1334static int check_class_table(struct ib_mad_mgmt_class_table *class)
1335{
1336 int i;
1337
1338 for (i = 0; i < MAX_MGMT_CLASS; i++)
1339 if (class->method_table[i])
1340 return 1;
1341 return 0;
1342}
1343
1344static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1345{
1346 int i;
1347
1348 for (i = 0; i < MAX_MGMT_OUI; i++)
1349 if (vendor_class->method_table[i])
1350 return 1;
1351 return 0;
1352}
1353
1354static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
Ira Weinyd94bd262015-06-06 14:38:22 -04001355 const char *oui)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356{
1357 int i;
1358
1359 for (i = 0; i < MAX_MGMT_OUI; i++)
Roland Dreier3cd96562006-09-22 15:22:46 -07001360 /* Is there matching OUI for this vendor class ? */
1361 if (!memcmp(vendor_class->oui[i], oui, 3))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 return i;
1363
1364 return -1;
1365}
1366
1367static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1368{
1369 int i;
1370
1371 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1372 if (vendor->vendor_class[i])
1373 return 1;
1374
1375 return 0;
1376}
1377
1378static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1379 struct ib_mad_agent_private *agent)
1380{
1381 int i;
1382
1383 /* Remove any methods for this mad agent */
1384 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1385 if (method->agent[i] == agent) {
1386 method->agent[i] = NULL;
1387 }
1388 }
1389}
1390
1391static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1392 struct ib_mad_agent_private *agent_priv,
1393 u8 mgmt_class)
1394{
1395 struct ib_mad_port_private *port_priv;
1396 struct ib_mad_mgmt_class_table **class;
1397 struct ib_mad_mgmt_method_table **method;
1398 int i, ret;
1399
1400 port_priv = agent_priv->qp_info->port_priv;
1401 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1402 if (!*class) {
1403 /* Allocate management class table for "new" class version */
Roland Dreierde6eb662005-11-02 07:23:14 -08001404 *class = kzalloc(sizeof **class, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 if (!*class) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001406 dev_err(&agent_priv->agent.device->dev,
1407 "No memory for ib_mad_mgmt_class_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 ret = -ENOMEM;
1409 goto error1;
1410 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001411
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 /* Allocate method table for this management class */
1413 method = &(*class)->method_table[mgmt_class];
1414 if ((ret = allocate_method_table(method)))
1415 goto error2;
1416 } else {
1417 method = &(*class)->method_table[mgmt_class];
1418 if (!*method) {
1419 /* Allocate method table for this management class */
1420 if ((ret = allocate_method_table(method)))
1421 goto error1;
1422 }
1423 }
1424
1425 /* Now, make sure methods are not already in use */
1426 if (method_in_use(method, mad_reg_req))
1427 goto error3;
1428
1429 /* Finally, add in methods being registered */
Akinobu Mita19b629f2010-03-05 13:41:38 -08001430 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 (*method)->agent[i] = agent_priv;
Akinobu Mita19b629f2010-03-05 13:41:38 -08001432
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 return 0;
1434
1435error3:
1436 /* Remove any methods for this mad agent */
1437 remove_methods_mad_agent(*method, agent_priv);
1438 /* Now, check to see if there are any methods in use */
1439 if (!check_method_table(*method)) {
1440 /* If not, release management method table */
1441 kfree(*method);
1442 *method = NULL;
1443 }
1444 ret = -EINVAL;
1445 goto error1;
1446error2:
1447 kfree(*class);
1448 *class = NULL;
1449error1:
1450 return ret;
1451}
1452
1453static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1454 struct ib_mad_agent_private *agent_priv)
1455{
1456 struct ib_mad_port_private *port_priv;
1457 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1458 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1459 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1460 struct ib_mad_mgmt_method_table **method;
1461 int i, ret = -ENOMEM;
1462 u8 vclass;
1463
1464 /* "New" vendor (with OUI) class */
1465 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1466 port_priv = agent_priv->qp_info->port_priv;
1467 vendor_table = &port_priv->version[
1468 mad_reg_req->mgmt_class_version].vendor;
1469 if (!*vendor_table) {
1470 /* Allocate mgmt vendor class table for "new" class version */
Roland Dreierde6eb662005-11-02 07:23:14 -08001471 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 if (!vendor) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001473 dev_err(&agent_priv->agent.device->dev,
1474 "No memory for ib_mad_mgmt_vendor_class_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 goto error1;
1476 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001477
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 *vendor_table = vendor;
1479 }
1480 if (!(*vendor_table)->vendor_class[vclass]) {
1481 /* Allocate table for this management vendor class */
Roland Dreierde6eb662005-11-02 07:23:14 -08001482 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 if (!vendor_class) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001484 dev_err(&agent_priv->agent.device->dev,
1485 "No memory for ib_mad_mgmt_vendor_class\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 goto error2;
1487 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001488
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 (*vendor_table)->vendor_class[vclass] = vendor_class;
1490 }
1491 for (i = 0; i < MAX_MGMT_OUI; i++) {
1492 /* Is there matching OUI for this vendor class ? */
1493 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1494 mad_reg_req->oui, 3)) {
1495 method = &(*vendor_table)->vendor_class[
1496 vclass]->method_table[i];
1497 BUG_ON(!*method);
1498 goto check_in_use;
1499 }
1500 }
1501 for (i = 0; i < MAX_MGMT_OUI; i++) {
1502 /* OUI slot available ? */
1503 if (!is_vendor_oui((*vendor_table)->vendor_class[
1504 vclass]->oui[i])) {
1505 method = &(*vendor_table)->vendor_class[
1506 vclass]->method_table[i];
1507 BUG_ON(*method);
1508 /* Allocate method table for this OUI */
1509 if ((ret = allocate_method_table(method)))
1510 goto error3;
1511 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1512 mad_reg_req->oui, 3);
1513 goto check_in_use;
1514 }
1515 }
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001516 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 goto error3;
1518
1519check_in_use:
1520 /* Now, make sure methods are not already in use */
1521 if (method_in_use(method, mad_reg_req))
1522 goto error4;
1523
1524 /* Finally, add in methods being registered */
Akinobu Mita19b629f2010-03-05 13:41:38 -08001525 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 (*method)->agent[i] = agent_priv;
Akinobu Mita19b629f2010-03-05 13:41:38 -08001527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 return 0;
1529
1530error4:
1531 /* Remove any methods for this mad agent */
1532 remove_methods_mad_agent(*method, agent_priv);
1533 /* Now, check to see if there are any methods in use */
1534 if (!check_method_table(*method)) {
1535 /* If not, release management method table */
1536 kfree(*method);
1537 *method = NULL;
1538 }
1539 ret = -EINVAL;
1540error3:
1541 if (vendor_class) {
1542 (*vendor_table)->vendor_class[vclass] = NULL;
1543 kfree(vendor_class);
1544 }
1545error2:
1546 if (vendor) {
1547 *vendor_table = NULL;
1548 kfree(vendor);
1549 }
1550error1:
1551 return ret;
1552}
1553
1554static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1555{
1556 struct ib_mad_port_private *port_priv;
1557 struct ib_mad_mgmt_class_table *class;
1558 struct ib_mad_mgmt_method_table *method;
1559 struct ib_mad_mgmt_vendor_class_table *vendor;
1560 struct ib_mad_mgmt_vendor_class *vendor_class;
1561 int index;
1562 u8 mgmt_class;
1563
1564 /*
1565 * Was MAD registration request supplied
1566 * with original registration ?
1567 */
1568 if (!agent_priv->reg_req) {
1569 goto out;
1570 }
1571
1572 port_priv = agent_priv->qp_info->port_priv;
1573 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1574 class = port_priv->version[
1575 agent_priv->reg_req->mgmt_class_version].class;
1576 if (!class)
1577 goto vendor_check;
1578
1579 method = class->method_table[mgmt_class];
1580 if (method) {
1581 /* Remove any methods for this mad agent */
1582 remove_methods_mad_agent(method, agent_priv);
1583 /* Now, check to see if there are any methods still in use */
1584 if (!check_method_table(method)) {
1585 /* If not, release management method table */
1586 kfree(method);
1587 class->method_table[mgmt_class] = NULL;
1588 /* Any management classes left ? */
1589 if (!check_class_table(class)) {
1590 /* If not, release management class table */
1591 kfree(class);
1592 port_priv->version[
1593 agent_priv->reg_req->
1594 mgmt_class_version].class = NULL;
1595 }
1596 }
1597 }
1598
1599vendor_check:
1600 if (!is_vendor_class(mgmt_class))
1601 goto out;
1602
1603 /* normalize mgmt_class to vendor range 2 */
1604 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1605 vendor = port_priv->version[
1606 agent_priv->reg_req->mgmt_class_version].vendor;
1607
1608 if (!vendor)
1609 goto out;
1610
1611 vendor_class = vendor->vendor_class[mgmt_class];
1612 if (vendor_class) {
1613 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1614 if (index < 0)
1615 goto out;
1616 method = vendor_class->method_table[index];
1617 if (method) {
1618 /* Remove any methods for this mad agent */
1619 remove_methods_mad_agent(method, agent_priv);
1620 /*
1621 * Now, check to see if there are
1622 * any methods still in use
1623 */
1624 if (!check_method_table(method)) {
1625 /* If not, release management method table */
1626 kfree(method);
1627 vendor_class->method_table[index] = NULL;
1628 memset(vendor_class->oui[index], 0, 3);
1629 /* Any OUIs left ? */
1630 if (!check_vendor_class(vendor_class)) {
1631 /* If not, release vendor class table */
1632 kfree(vendor_class);
1633 vendor->vendor_class[mgmt_class] = NULL;
1634 /* Any other vendor classes left ? */
1635 if (!check_vendor_table(vendor)) {
1636 kfree(vendor);
1637 port_priv->version[
1638 agent_priv->reg_req->
1639 mgmt_class_version].
1640 vendor = NULL;
1641 }
1642 }
1643 }
1644 }
1645 }
1646
1647out:
1648 return;
1649}
1650
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651static struct ib_mad_agent_private *
1652find_mad_agent(struct ib_mad_port_private *port_priv,
Ira Weinyd94bd262015-06-06 14:38:22 -04001653 const struct ib_mad_hdr *mad_hdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654{
1655 struct ib_mad_agent_private *mad_agent = NULL;
1656 unsigned long flags;
1657
1658 spin_lock_irqsave(&port_priv->reg_lock, flags);
Ira Weinyd94bd262015-06-06 14:38:22 -04001659 if (ib_response_mad(mad_hdr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 u32 hi_tid;
1661 struct ib_mad_agent_private *entry;
1662
1663 /*
1664 * Routing is based on high 32 bits of transaction ID
1665 * of MAD.
1666 */
Ira Weinyd94bd262015-06-06 14:38:22 -04001667 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
Sean Hefty34816ad2005-10-25 10:51:39 -07001668 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 if (entry->agent.hi_tid == hi_tid) {
1670 mad_agent = entry;
1671 break;
1672 }
1673 }
1674 } else {
1675 struct ib_mad_mgmt_class_table *class;
1676 struct ib_mad_mgmt_method_table *method;
1677 struct ib_mad_mgmt_vendor_class_table *vendor;
1678 struct ib_mad_mgmt_vendor_class *vendor_class;
Ira Weinyd94bd262015-06-06 14:38:22 -04001679 const struct ib_vendor_mad *vendor_mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 int index;
1681
1682 /*
1683 * Routing is based on version, class, and method
1684 * For "newer" vendor MADs, also based on OUI
1685 */
Ira Weinyd94bd262015-06-06 14:38:22 -04001686 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 goto out;
Ira Weinyd94bd262015-06-06 14:38:22 -04001688 if (!is_vendor_class(mad_hdr->mgmt_class)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 class = port_priv->version[
Ira Weinyd94bd262015-06-06 14:38:22 -04001690 mad_hdr->class_version].class;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 if (!class)
1692 goto out;
Ira Weinyd94bd262015-06-06 14:38:22 -04001693 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
Hefty, Seanb7ab0b12011-10-06 09:33:05 -07001694 IB_MGMT_MAX_METHODS)
1695 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 method = class->method_table[convert_mgmt_class(
Ira Weinyd94bd262015-06-06 14:38:22 -04001697 mad_hdr->mgmt_class)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 if (method)
Ira Weinyd94bd262015-06-06 14:38:22 -04001699 mad_agent = method->agent[mad_hdr->method &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 ~IB_MGMT_METHOD_RESP];
1701 } else {
1702 vendor = port_priv->version[
Ira Weinyd94bd262015-06-06 14:38:22 -04001703 mad_hdr->class_version].vendor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 if (!vendor)
1705 goto out;
1706 vendor_class = vendor->vendor_class[vendor_class_index(
Ira Weinyd94bd262015-06-06 14:38:22 -04001707 mad_hdr->mgmt_class)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 if (!vendor_class)
1709 goto out;
1710 /* Find matching OUI */
Ira Weinyd94bd262015-06-06 14:38:22 -04001711 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1713 if (index == -1)
1714 goto out;
1715 method = vendor_class->method_table[index];
1716 if (method) {
Ira Weinyd94bd262015-06-06 14:38:22 -04001717 mad_agent = method->agent[mad_hdr->method &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 ~IB_MGMT_METHOD_RESP];
1719 }
1720 }
1721 }
1722
1723 if (mad_agent) {
1724 if (mad_agent->agent.recv_handler)
1725 atomic_inc(&mad_agent->refcount);
1726 else {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001727 dev_notice(&port_priv->device->dev,
1728 "No receive handler for client %p on port %d\n",
1729 &mad_agent->agent, port_priv->port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 mad_agent = NULL;
1731 }
1732 }
1733out:
1734 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1735
1736 return mad_agent;
1737}
1738
Ira Weiny77f60832015-05-08 14:27:21 -04001739static int validate_mad(const struct ib_mad_hdr *mad_hdr, u32 qp_num)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740{
1741 int valid = 0;
1742
1743 /* Make sure MAD base version is understood */
Ira Weiny77f60832015-05-08 14:27:21 -04001744 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001745 pr_err("MAD received with unsupported base version %d\n",
Ira Weiny77f60832015-05-08 14:27:21 -04001746 mad_hdr->base_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 goto out;
1748 }
1749
1750 /* Filter SMI packets sent to other than QP0 */
Ira Weiny77f60832015-05-08 14:27:21 -04001751 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1752 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 if (qp_num == 0)
1754 valid = 1;
1755 } else {
1756 /* Filter GSI packets sent to QP0 */
1757 if (qp_num != 0)
1758 valid = 1;
1759 }
1760
1761out:
1762 return valid;
1763}
1764
Ira Weinyf766c582015-05-08 14:27:24 -04001765static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1766 const struct ib_mad_hdr *mad_hdr)
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001767{
1768 struct ib_rmpp_mad *rmpp_mad;
1769
1770 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1771 return !mad_agent_priv->agent.rmpp_version ||
Ira Weiny1471cb62014-08-08 19:00:56 -04001772 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001773 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1774 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1775 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1776}
1777
Ira Weiny8bf4b302015-05-08 14:27:23 -04001778static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1779 const struct ib_mad_recv_wc *rwc)
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001780{
Ira Weiny8bf4b302015-05-08 14:27:23 -04001781 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001782 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1783}
1784
Ira Weinyf766c582015-05-08 14:27:24 -04001785static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1786 const struct ib_mad_send_wr_private *wr,
1787 const struct ib_mad_recv_wc *rwc )
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001788{
1789 struct ib_ah_attr attr;
1790 u8 send_resp, rcv_resp;
Jack Morgenstein9874e742006-06-17 20:37:34 -07001791 union ib_gid sgid;
1792 struct ib_device *device = mad_agent_priv->agent.device;
1793 u8 port_num = mad_agent_priv->agent.port_num;
1794 u8 lmc;
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001795
Ira Weiny96909302015-05-08 14:27:22 -04001796 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1797 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001798
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001799 if (send_resp == rcv_resp)
1800 /* both requests, or both responses. GIDs different */
1801 return 0;
1802
1803 if (ib_query_ah(wr->send_buf.ah, &attr))
1804 /* Assume not equal, to avoid false positives. */
1805 return 0;
1806
Jack Morgenstein9874e742006-06-17 20:37:34 -07001807 if (!!(attr.ah_flags & IB_AH_GRH) !=
1808 !!(rwc->wc->wc_flags & IB_WC_GRH))
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001809 /* one has GID, other does not. Assume different */
1810 return 0;
Jack Morgenstein9874e742006-06-17 20:37:34 -07001811
1812 if (!send_resp && rcv_resp) {
1813 /* is request/response. */
1814 if (!(attr.ah_flags & IB_AH_GRH)) {
1815 if (ib_get_cached_lmc(device, port_num, &lmc))
1816 return 0;
1817 return (!lmc || !((attr.src_path_bits ^
1818 rwc->wc->dlid_path_bits) &
1819 ((1 << lmc) - 1)));
1820 } else {
1821 if (ib_get_cached_gid(device, port_num,
1822 attr.grh.sgid_index, &sgid))
1823 return 0;
1824 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1825 16);
1826 }
1827 }
1828
1829 if (!(attr.ah_flags & IB_AH_GRH))
1830 return attr.dlid == rwc->wc->slid;
1831 else
1832 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1833 16);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001834}
Jack Morgenstein9874e742006-06-17 20:37:34 -07001835
1836static inline int is_direct(u8 class)
1837{
1838 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1839}
1840
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001841struct ib_mad_send_wr_private*
Ira Weinyf766c582015-05-08 14:27:24 -04001842ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1843 const struct ib_mad_recv_wc *wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844{
Jack Morgenstein9874e742006-06-17 20:37:34 -07001845 struct ib_mad_send_wr_private *wr;
Ira Weiny83a1d222015-06-06 14:38:23 -04001846 const struct ib_mad_hdr *mad_hdr;
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001847
Ira Weiny83a1d222015-06-06 14:38:23 -04001848 mad_hdr = &wc->recv_buf.mad->mad_hdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849
Jack Morgenstein9874e742006-06-17 20:37:34 -07001850 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
Ira Weiny83a1d222015-06-06 14:38:23 -04001851 if ((wr->tid == mad_hdr->tid) &&
Jack Morgenstein9874e742006-06-17 20:37:34 -07001852 rcv_has_same_class(wr, wc) &&
1853 /*
1854 * Don't check GID for direct routed MADs.
1855 * These might have permissive LIDs.
1856 */
Ira Weiny83a1d222015-06-06 14:38:23 -04001857 (is_direct(mad_hdr->mgmt_class) ||
Jack Morgenstein9874e742006-06-17 20:37:34 -07001858 rcv_has_same_gid(mad_agent_priv, wr, wc)))
Roland Dreier39798692006-11-13 09:38:07 -08001859 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 }
1861
1862 /*
1863 * It's possible to receive the response before we've
1864 * been notified that the send has completed
1865 */
Jack Morgenstein9874e742006-06-17 20:37:34 -07001866 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
Ira Weinyc597eee2015-05-08 13:10:03 -04001867 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
Ira Weiny83a1d222015-06-06 14:38:23 -04001868 wr->tid == mad_hdr->tid &&
Jack Morgenstein9874e742006-06-17 20:37:34 -07001869 wr->timeout &&
1870 rcv_has_same_class(wr, wc) &&
1871 /*
1872 * Don't check GID for direct routed MADs.
1873 * These might have permissive LIDs.
1874 */
Ira Weiny83a1d222015-06-06 14:38:23 -04001875 (is_direct(mad_hdr->mgmt_class) ||
Jack Morgenstein9874e742006-06-17 20:37:34 -07001876 rcv_has_same_gid(mad_agent_priv, wr, wc)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 /* Verify request has not been canceled */
Jack Morgenstein9874e742006-06-17 20:37:34 -07001878 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 }
1880 return NULL;
1881}
1882
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001883void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001884{
1885 mad_send_wr->timeout = 0;
Akinobu Mita179e0912006-06-26 00:24:41 -07001886 if (mad_send_wr->refcount == 1)
1887 list_move_tail(&mad_send_wr->agent_list,
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001888 &mad_send_wr->mad_agent_priv->done_list);
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001889}
1890
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001892 struct ib_mad_recv_wc *mad_recv_wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893{
1894 struct ib_mad_send_wr_private *mad_send_wr;
1895 struct ib_mad_send_wc mad_send_wc;
1896 unsigned long flags;
1897
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001898 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1899 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
Ira Weiny1471cb62014-08-08 19:00:56 -04001900 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001901 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1902 mad_recv_wc);
1903 if (!mad_recv_wc) {
Sean Hefty1b52fa982006-05-12 14:57:52 -07001904 deref_mad_agent(mad_agent_priv);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001905 return;
1906 }
1907 }
1908
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 /* Complete corresponding request */
Ira Weiny96909302015-05-08 14:27:22 -04001910 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001912 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 if (!mad_send_wr) {
1914 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Ira Weiny1471cb62014-08-08 19:00:56 -04001915 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1916 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1917 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1918 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1919 /* user rmpp is in effect
1920 * and this is an active RMPP MAD
1921 */
1922 mad_recv_wc->wc->wr_id = 0;
1923 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1924 mad_recv_wc);
1925 atomic_dec(&mad_agent_priv->refcount);
1926 } else {
1927 /* not user rmpp, revert to normal behavior and
1928 * drop the mad */
1929 ib_free_recv_mad(mad_recv_wc);
1930 deref_mad_agent(mad_agent_priv);
1931 return;
1932 }
1933 } else {
1934 ib_mark_mad_done(mad_send_wr);
1935 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1936
1937 /* Defined behavior is to complete response before request */
1938 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1939 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1940 mad_recv_wc);
1941 atomic_dec(&mad_agent_priv->refcount);
1942
1943 mad_send_wc.status = IB_WC_SUCCESS;
1944 mad_send_wc.vendor_err = 0;
1945 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1946 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 } else {
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001949 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1950 mad_recv_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -07001951 deref_mad_agent(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 }
1953}
1954
Ira Weinye11ae8a2015-06-06 14:38:24 -04001955static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
1956 const struct ib_mad_qp_info *qp_info,
1957 const struct ib_wc *wc,
1958 int port_num,
1959 struct ib_mad_private *recv,
1960 struct ib_mad_private *response)
1961{
1962 enum smi_forward_action retsmi;
Ira Weinyc9082e52015-06-06 14:38:30 -04001963 struct ib_smp *smp = (struct ib_smp *)recv->mad;
Ira Weinye11ae8a2015-06-06 14:38:24 -04001964
Ira Weinyc9082e52015-06-06 14:38:30 -04001965 if (smi_handle_dr_smp_recv(smp,
Ira Weinye11ae8a2015-06-06 14:38:24 -04001966 port_priv->device->node_type,
1967 port_num,
1968 port_priv->device->phys_port_cnt) ==
1969 IB_SMI_DISCARD)
1970 return IB_SMI_DISCARD;
1971
Ira Weinyc9082e52015-06-06 14:38:30 -04001972 retsmi = smi_check_forward_dr_smp(smp);
Ira Weinye11ae8a2015-06-06 14:38:24 -04001973 if (retsmi == IB_SMI_LOCAL)
1974 return IB_SMI_HANDLE;
1975
1976 if (retsmi == IB_SMI_SEND) { /* don't forward */
Ira Weinyc9082e52015-06-06 14:38:30 -04001977 if (smi_handle_dr_smp_send(smp,
Ira Weinye11ae8a2015-06-06 14:38:24 -04001978 port_priv->device->node_type,
1979 port_num) == IB_SMI_DISCARD)
1980 return IB_SMI_DISCARD;
1981
Ira Weinyc9082e52015-06-06 14:38:30 -04001982 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
Ira Weinye11ae8a2015-06-06 14:38:24 -04001983 return IB_SMI_DISCARD;
1984 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1985 /* forward case for switches */
Ira Weinyc9082e52015-06-06 14:38:30 -04001986 memcpy(response, recv, mad_priv_size(response));
Ira Weinye11ae8a2015-06-06 14:38:24 -04001987 response->header.recv_wc.wc = &response->header.wc;
Ira Weinyc9082e52015-06-06 14:38:30 -04001988 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
Ira Weinye11ae8a2015-06-06 14:38:24 -04001989 response->header.recv_wc.recv_buf.grh = &response->grh;
1990
Ira Weinyc9082e52015-06-06 14:38:30 -04001991 agent_send_response((const struct ib_mad_hdr *)response->mad,
Ira Weinye11ae8a2015-06-06 14:38:24 -04001992 &response->grh, wc,
1993 port_priv->device,
Ira Weinyc9082e52015-06-06 14:38:30 -04001994 smi_get_fwd_port(smp),
1995 qp_info->qp->qp_num,
1996 response->mad_size);
Ira Weinye11ae8a2015-06-06 14:38:24 -04001997
1998 return IB_SMI_DISCARD;
1999 }
2000 return IB_SMI_HANDLE;
2001}
2002
Ira Weinyc9082e52015-06-06 14:38:30 -04002003static bool generate_unmatched_resp(const struct ib_mad_private *recv,
Swapna Thete0b307042012-02-25 17:47:32 -08002004 struct ib_mad_private *response)
2005{
Ira Weinyc9082e52015-06-06 14:38:30 -04002006 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2007 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2008
2009 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2010 recv_hdr->method == IB_MGMT_METHOD_SET) {
2011 memcpy(response, recv, mad_priv_size(response));
Swapna Thete0b307042012-02-25 17:47:32 -08002012 response->header.recv_wc.wc = &response->header.wc;
Ira Weinyc9082e52015-06-06 14:38:30 -04002013 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
Swapna Thete0b307042012-02-25 17:47:32 -08002014 response->header.recv_wc.recv_buf.grh = &response->grh;
Ira Weinyc9082e52015-06-06 14:38:30 -04002015 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2016 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2017 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2018 resp_hdr->status |= IB_SMP_DIRECTION;
Swapna Thete0b307042012-02-25 17:47:32 -08002019
2020 return true;
2021 } else {
2022 return false;
2023 }
2024}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
2026 struct ib_wc *wc)
2027{
2028 struct ib_mad_qp_info *qp_info;
2029 struct ib_mad_private_header *mad_priv_hdr;
Hal Rosenstock445d6802007-08-03 10:45:17 -07002030 struct ib_mad_private *recv, *response = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 struct ib_mad_list_head *mad_list;
2032 struct ib_mad_agent_private *mad_agent;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002033 int port_num;
Jack Morgensteina9e74322012-04-24 16:08:57 -07002034 int ret = IB_MAD_RESULT_SUCCESS;
Ira Weiny4cd7c942015-06-06 14:38:31 -04002035 size_t mad_size;
2036 u16 resp_mad_pkey_index = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2039 qp_info = mad_list->mad_queue->qp_info;
2040 dequeue_mad(mad_list);
2041
2042 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2043 mad_list);
2044 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
Ralph Campbell15271062006-12-12 14:28:30 -08002045 ib_dma_unmap_single(port_priv->device,
2046 recv->header.mapping,
Ira Weinyc9082e52015-06-06 14:38:30 -04002047 mad_priv_dma_size(recv),
Ralph Campbell15271062006-12-12 14:28:30 -08002048 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049
2050 /* Setup MAD receive work completion from "normal" work completion */
Sean Hefty24239af2005-04-16 15:26:08 -07002051 recv->header.wc = *wc;
2052 recv->header.recv_wc.wc = &recv->header.wc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
Ira Weinyc9082e52015-06-06 14:38:30 -04002054 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2056
2057 if (atomic_read(&qp_info->snoop_count))
2058 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2059
2060 /* Validate MAD */
Ira Weinyc9082e52015-06-06 14:38:30 -04002061 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info->qp->qp_num))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 goto out;
2063
Ira Weiny4cd7c942015-06-06 14:38:31 -04002064 mad_size = recv->mad_size;
2065 response = alloc_mad_private(mad_size, GFP_KERNEL);
Hal Rosenstock445d6802007-08-03 10:45:17 -07002066 if (!response) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002067 dev_err(&port_priv->device->dev,
2068 "ib_mad_recv_done_handler no memory for response buffer\n");
Hal Rosenstock445d6802007-08-03 10:45:17 -07002069 goto out;
2070 }
2071
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002072 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
2073 port_num = wc->port_num;
2074 else
2075 port_num = port_priv->port_num;
2076
Ira Weinyc9082e52015-06-06 14:38:30 -04002077 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
Ira Weinye11ae8a2015-06-06 14:38:24 -04002079 if (handle_ib_smi(port_priv, qp_info, wc, port_num, recv,
2080 response)
2081 == IB_SMI_DISCARD)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 }
2084
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 /* Give driver "right of first refusal" on incoming MAD */
2086 if (port_priv->device->process_mad) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 ret = port_priv->device->process_mad(port_priv->device, 0,
2088 port_priv->port_num,
2089 wc, &recv->grh,
Ira Weiny4cd7c942015-06-06 14:38:31 -04002090 (const struct ib_mad_hdr *)recv->mad,
2091 recv->mad_size,
2092 (struct ib_mad_hdr *)response->mad,
2093 &mad_size, &resp_mad_pkey_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 if (ret & IB_MAD_RESULT_SUCCESS) {
2095 if (ret & IB_MAD_RESULT_CONSUMED)
2096 goto out;
2097 if (ret & IB_MAD_RESULT_REPLY) {
Ira Weinyc9082e52015-06-06 14:38:30 -04002098 agent_send_response((const struct ib_mad_hdr *)response->mad,
Sean Hefty34816ad2005-10-25 10:51:39 -07002099 &recv->grh, wc,
2100 port_priv->device,
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002101 port_num,
Ira Weinyc9082e52015-06-06 14:38:30 -04002102 qp_info->qp->qp_num,
2103 response->mad_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 goto out;
2105 }
2106 }
2107 }
2108
Ira Weinyc9082e52015-06-06 14:38:30 -04002109 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 if (mad_agent) {
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07002111 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 /*
2113 * recv is freed up in error cases in ib_mad_complete_recv
2114 * or via recv_handler in ib_mad_complete_recv()
2115 */
2116 recv = NULL;
Jack Morgensteina9e74322012-04-24 16:08:57 -07002117 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2118 generate_unmatched_resp(recv, response)) {
Ira Weinyc9082e52015-06-06 14:38:30 -04002119 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2120 port_priv->device, port_num,
2121 qp_info->qp->qp_num, response->mad_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 }
2123
2124out:
2125 /* Post another receive request for this QP */
2126 if (response) {
2127 ib_mad_post_receive_mads(qp_info, response);
Ira Weinyc9082e52015-06-06 14:38:30 -04002128 kfree(recv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 } else
2130 ib_mad_post_receive_mads(qp_info, recv);
2131}
2132
2133static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2134{
2135 struct ib_mad_send_wr_private *mad_send_wr;
2136 unsigned long delay;
2137
2138 if (list_empty(&mad_agent_priv->wait_list)) {
Tejun Heo136b5722012-08-21 13:18:24 -07002139 cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 } else {
2141 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2142 struct ib_mad_send_wr_private,
2143 agent_list);
2144
2145 if (time_after(mad_agent_priv->timeout,
2146 mad_send_wr->timeout)) {
2147 mad_agent_priv->timeout = mad_send_wr->timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 delay = mad_send_wr->timeout - jiffies;
2149 if ((long)delay <= 0)
2150 delay = 1;
Tejun Heoe7c2f962012-08-21 13:18:24 -07002151 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2152 &mad_agent_priv->timed_work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 }
2154 }
2155}
2156
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002157static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158{
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002159 struct ib_mad_agent_private *mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 struct ib_mad_send_wr_private *temp_mad_send_wr;
2161 struct list_head *list_item;
2162 unsigned long delay;
2163
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002164 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 list_del(&mad_send_wr->agent_list);
2166
2167 delay = mad_send_wr->timeout;
2168 mad_send_wr->timeout += jiffies;
2169
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002170 if (delay) {
2171 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2172 temp_mad_send_wr = list_entry(list_item,
2173 struct ib_mad_send_wr_private,
2174 agent_list);
2175 if (time_after(mad_send_wr->timeout,
2176 temp_mad_send_wr->timeout))
2177 break;
2178 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 }
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002180 else
2181 list_item = &mad_agent_priv->wait_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 list_add(&mad_send_wr->agent_list, list_item);
2183
2184 /* Reschedule a work item if we have a shorter timeout */
Tejun Heoe7c2f962012-08-21 13:18:24 -07002185 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2186 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2187 &mad_agent_priv->timed_work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188}
2189
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002190void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2191 int timeout_ms)
2192{
2193 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2194 wait_for_response(mad_send_wr);
2195}
2196
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197/*
2198 * Process a send work completion
2199 */
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002200void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2201 struct ib_mad_send_wc *mad_send_wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202{
2203 struct ib_mad_agent_private *mad_agent_priv;
2204 unsigned long flags;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002205 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002207 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Ira Weiny1471cb62014-08-08 19:00:56 -04002209 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002210 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2211 if (ret == IB_RMPP_RESULT_CONSUMED)
2212 goto done;
2213 } else
2214 ret = IB_RMPP_RESULT_UNHANDLED;
2215
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 if (mad_send_wc->status != IB_WC_SUCCESS &&
2217 mad_send_wr->status == IB_WC_SUCCESS) {
2218 mad_send_wr->status = mad_send_wc->status;
2219 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2220 }
2221
2222 if (--mad_send_wr->refcount > 0) {
2223 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2224 mad_send_wr->status == IB_WC_SUCCESS) {
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002225 wait_for_response(mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002227 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 }
2229
2230 /* Remove send from MAD agent and notify client of completion */
2231 list_del(&mad_send_wr->agent_list);
2232 adjust_timeout(mad_agent_priv);
2233 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2234
2235 if (mad_send_wr->status != IB_WC_SUCCESS )
2236 mad_send_wc->status = mad_send_wr->status;
Sean Hefty34816ad2005-10-25 10:51:39 -07002237 if (ret == IB_RMPP_RESULT_INTERNAL)
2238 ib_rmpp_send_handler(mad_send_wc);
2239 else
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002240 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2241 mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242
2243 /* Release reference on agent taken when sending */
Sean Hefty1b52fa982006-05-12 14:57:52 -07002244 deref_mad_agent(mad_agent_priv);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002245 return;
2246done:
2247 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248}
2249
2250static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2251 struct ib_wc *wc)
2252{
2253 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2254 struct ib_mad_list_head *mad_list;
2255 struct ib_mad_qp_info *qp_info;
2256 struct ib_mad_queue *send_queue;
2257 struct ib_send_wr *bad_send_wr;
Sean Hefty34816ad2005-10-25 10:51:39 -07002258 struct ib_mad_send_wc mad_send_wc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 unsigned long flags;
2260 int ret;
2261
2262 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2263 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2264 mad_list);
2265 send_queue = mad_list->mad_queue;
2266 qp_info = send_queue->qp_info;
2267
2268retry:
Ralph Campbell15271062006-12-12 14:28:30 -08002269 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2270 mad_send_wr->header_mapping,
2271 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2272 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2273 mad_send_wr->payload_mapping,
2274 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 queued_send_wr = NULL;
2276 spin_lock_irqsave(&send_queue->lock, flags);
2277 list_del(&mad_list->list);
2278
2279 /* Move queued send to the send queue */
2280 if (send_queue->count-- > send_queue->max_active) {
2281 mad_list = container_of(qp_info->overflow_list.next,
2282 struct ib_mad_list_head, list);
2283 queued_send_wr = container_of(mad_list,
2284 struct ib_mad_send_wr_private,
2285 mad_list);
Akinobu Mita179e0912006-06-26 00:24:41 -07002286 list_move_tail(&mad_list->list, &send_queue->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 }
2288 spin_unlock_irqrestore(&send_queue->lock, flags);
2289
Sean Hefty34816ad2005-10-25 10:51:39 -07002290 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2291 mad_send_wc.status = wc->status;
2292 mad_send_wc.vendor_err = wc->vendor_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 if (atomic_read(&qp_info->snoop_count))
Sean Hefty34816ad2005-10-25 10:51:39 -07002294 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 IB_MAD_SNOOP_SEND_COMPLETIONS);
Sean Hefty34816ad2005-10-25 10:51:39 -07002296 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
2298 if (queued_send_wr) {
2299 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
Sean Hefty34816ad2005-10-25 10:51:39 -07002300 &bad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002302 dev_err(&port_priv->device->dev,
2303 "ib_post_send failed: %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 mad_send_wr = queued_send_wr;
2305 wc->status = IB_WC_LOC_QP_OP_ERR;
2306 goto retry;
2307 }
2308 }
2309}
2310
2311static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2312{
2313 struct ib_mad_send_wr_private *mad_send_wr;
2314 struct ib_mad_list_head *mad_list;
2315 unsigned long flags;
2316
2317 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2318 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2319 mad_send_wr = container_of(mad_list,
2320 struct ib_mad_send_wr_private,
2321 mad_list);
2322 mad_send_wr->retry = 1;
2323 }
2324 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2325}
2326
2327static void mad_error_handler(struct ib_mad_port_private *port_priv,
2328 struct ib_wc *wc)
2329{
2330 struct ib_mad_list_head *mad_list;
2331 struct ib_mad_qp_info *qp_info;
2332 struct ib_mad_send_wr_private *mad_send_wr;
2333 int ret;
2334
2335 /* Determine if failure was a send or receive */
2336 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2337 qp_info = mad_list->mad_queue->qp_info;
2338 if (mad_list->mad_queue == &qp_info->recv_queue)
2339 /*
2340 * Receive errors indicate that the QP has entered the error
2341 * state - error handling/shutdown code will cleanup
2342 */
2343 return;
2344
2345 /*
2346 * Send errors will transition the QP to SQE - move
2347 * QP to RTS and repost flushed work requests
2348 */
2349 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2350 mad_list);
2351 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2352 if (mad_send_wr->retry) {
2353 /* Repost send */
2354 struct ib_send_wr *bad_send_wr;
2355
2356 mad_send_wr->retry = 0;
2357 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2358 &bad_send_wr);
2359 if (ret)
2360 ib_mad_send_done_handler(port_priv, wc);
2361 } else
2362 ib_mad_send_done_handler(port_priv, wc);
2363 } else {
2364 struct ib_qp_attr *attr;
2365
2366 /* Transition QP to RTS and fail offending send */
2367 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2368 if (attr) {
2369 attr->qp_state = IB_QPS_RTS;
2370 attr->cur_qp_state = IB_QPS_SQE;
2371 ret = ib_modify_qp(qp_info->qp, attr,
2372 IB_QP_STATE | IB_QP_CUR_STATE);
2373 kfree(attr);
2374 if (ret)
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002375 dev_err(&port_priv->device->dev,
2376 "mad_error_handler - ib_modify_qp to RTS : %d\n",
2377 ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 else
2379 mark_sends_for_retry(qp_info);
2380 }
2381 ib_mad_send_done_handler(port_priv, wc);
2382 }
2383}
2384
2385/*
2386 * IB MAD completion callback
2387 */
David Howellsc4028952006-11-22 14:57:56 +00002388static void ib_mad_completion_handler(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389{
2390 struct ib_mad_port_private *port_priv;
2391 struct ib_wc wc;
2392
David Howellsc4028952006-11-22 14:57:56 +00002393 port_priv = container_of(work, struct ib_mad_port_private, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2395
2396 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2397 if (wc.status == IB_WC_SUCCESS) {
2398 switch (wc.opcode) {
2399 case IB_WC_SEND:
2400 ib_mad_send_done_handler(port_priv, &wc);
2401 break;
2402 case IB_WC_RECV:
2403 ib_mad_recv_done_handler(port_priv, &wc);
2404 break;
2405 default:
2406 BUG_ON(1);
2407 break;
2408 }
2409 } else
2410 mad_error_handler(port_priv, &wc);
2411 }
2412}
2413
2414static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2415{
2416 unsigned long flags;
2417 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2418 struct ib_mad_send_wc mad_send_wc;
2419 struct list_head cancel_list;
2420
2421 INIT_LIST_HEAD(&cancel_list);
2422
2423 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2424 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2425 &mad_agent_priv->send_list, agent_list) {
2426 if (mad_send_wr->status == IB_WC_SUCCESS) {
Roland Dreier3cd96562006-09-22 15:22:46 -07002427 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2429 }
2430 }
2431
2432 /* Empty wait list to prevent receives from finding a request */
2433 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2434 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2435
2436 /* Report all cancelled requests */
2437 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2438 mad_send_wc.vendor_err = 0;
2439
2440 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2441 &cancel_list, agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002442 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2443 list_del(&mad_send_wr->agent_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2445 &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 atomic_dec(&mad_agent_priv->refcount);
2447 }
2448}
2449
2450static struct ib_mad_send_wr_private*
Sean Hefty34816ad2005-10-25 10:51:39 -07002451find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2452 struct ib_mad_send_buf *send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453{
2454 struct ib_mad_send_wr_private *mad_send_wr;
2455
2456 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2457 agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002458 if (&mad_send_wr->send_buf == send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 return mad_send_wr;
2460 }
2461
2462 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2463 agent_list) {
Ira Weinyc597eee2015-05-08 13:10:03 -04002464 if (is_rmpp_data_mad(mad_agent_priv,
2465 mad_send_wr->send_buf.mad) &&
Sean Hefty34816ad2005-10-25 10:51:39 -07002466 &mad_send_wr->send_buf == send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 return mad_send_wr;
2468 }
2469 return NULL;
2470}
2471
Sean Hefty34816ad2005-10-25 10:51:39 -07002472int ib_modify_mad(struct ib_mad_agent *mad_agent,
2473 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474{
2475 struct ib_mad_agent_private *mad_agent_priv;
2476 struct ib_mad_send_wr_private *mad_send_wr;
2477 unsigned long flags;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002478 int active;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479
2480 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2481 agent);
2482 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Sean Hefty34816ad2005-10-25 10:51:39 -07002483 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002484 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002486 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 }
2488
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002489 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002490 if (!timeout_ms) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002492 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 }
2494
Sean Hefty34816ad2005-10-25 10:51:39 -07002495 mad_send_wr->send_buf.timeout_ms = timeout_ms;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002496 if (active)
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002497 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2498 else
2499 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002501 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2502 return 0;
2503}
2504EXPORT_SYMBOL(ib_modify_mad);
2505
Sean Hefty34816ad2005-10-25 10:51:39 -07002506void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2507 struct ib_mad_send_buf *send_buf)
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002508{
Sean Hefty34816ad2005-10-25 10:51:39 -07002509 ib_modify_mad(mad_agent, send_buf, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510}
2511EXPORT_SYMBOL(ib_cancel_mad);
2512
David Howellsc4028952006-11-22 14:57:56 +00002513static void local_completions(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514{
2515 struct ib_mad_agent_private *mad_agent_priv;
2516 struct ib_mad_local_private *local;
2517 struct ib_mad_agent_private *recv_mad_agent;
2518 unsigned long flags;
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002519 int free_mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 struct ib_wc wc;
2521 struct ib_mad_send_wc mad_send_wc;
2522
David Howellsc4028952006-11-22 14:57:56 +00002523 mad_agent_priv =
2524 container_of(work, struct ib_mad_agent_private, local_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525
2526 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2527 while (!list_empty(&mad_agent_priv->local_list)) {
2528 local = list_entry(mad_agent_priv->local_list.next,
2529 struct ib_mad_local_private,
2530 completion_list);
Michael S. Tsirkin37289ef2006-03-30 15:52:54 +02002531 list_del(&local->completion_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002533 free_mad = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 if (local->mad_priv) {
2535 recv_mad_agent = local->recv_mad_agent;
2536 if (!recv_mad_agent) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002537 dev_err(&mad_agent_priv->agent.device->dev,
2538 "No receive MAD agent for local completion\n");
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002539 free_mad = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 goto local_send_completion;
2541 }
2542
2543 /*
2544 * Defined behavior is to complete response
2545 * before request
2546 */
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +02002547 build_smp_wc(recv_mad_agent->agent.qp,
2548 (unsigned long) local->mad_send_wr,
Sean Hefty97f52eb2005-08-13 21:05:57 -07002549 be16_to_cpu(IB_LID_PERMISSIVE),
Sean Hefty34816ad2005-10-25 10:51:39 -07002550 0, recv_mad_agent->agent.port_num, &wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551
2552 local->mad_priv->header.recv_wc.wc = &wc;
2553 local->mad_priv->header.recv_wc.mad_len =
2554 sizeof(struct ib_mad);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002555 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2556 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2557 &local->mad_priv->header.recv_wc.rmpp_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2559 local->mad_priv->header.recv_wc.recv_buf.mad =
Ira Weinyc9082e52015-06-06 14:38:30 -04002560 (struct ib_mad *)local->mad_priv->mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2562 snoop_recv(recv_mad_agent->qp_info,
2563 &local->mad_priv->header.recv_wc,
2564 IB_MAD_SNOOP_RECVS);
2565 recv_mad_agent->agent.recv_handler(
2566 &recv_mad_agent->agent,
2567 &local->mad_priv->header.recv_wc);
2568 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2569 atomic_dec(&recv_mad_agent->refcount);
2570 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2571 }
2572
2573local_send_completion:
2574 /* Complete send */
2575 mad_send_wc.status = IB_WC_SUCCESS;
2576 mad_send_wc.vendor_err = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07002577 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
Sean Hefty34816ad2005-10-25 10:51:39 -07002579 snoop_send(mad_agent_priv->qp_info,
2580 &local->mad_send_wr->send_buf,
2581 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2583 &mad_send_wc);
2584
2585 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 atomic_dec(&mad_agent_priv->refcount);
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002587 if (free_mad)
Ira Weinyc9082e52015-06-06 14:38:30 -04002588 kfree(local->mad_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 kfree(local);
2590 }
2591 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2592}
2593
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002594static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2595{
2596 int ret;
2597
Sean Hefty4fc8cd42007-11-27 00:11:04 -08002598 if (!mad_send_wr->retries_left)
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002599 return -ETIMEDOUT;
2600
Sean Hefty4fc8cd42007-11-27 00:11:04 -08002601 mad_send_wr->retries_left--;
2602 mad_send_wr->send_buf.retries++;
2603
Sean Hefty34816ad2005-10-25 10:51:39 -07002604 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002605
Ira Weiny1471cb62014-08-08 19:00:56 -04002606 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002607 ret = ib_retry_rmpp(mad_send_wr);
2608 switch (ret) {
2609 case IB_RMPP_RESULT_UNHANDLED:
2610 ret = ib_send_mad(mad_send_wr);
2611 break;
2612 case IB_RMPP_RESULT_CONSUMED:
2613 ret = 0;
2614 break;
2615 default:
2616 ret = -ECOMM;
2617 break;
2618 }
2619 } else
2620 ret = ib_send_mad(mad_send_wr);
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002621
2622 if (!ret) {
2623 mad_send_wr->refcount++;
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002624 list_add_tail(&mad_send_wr->agent_list,
2625 &mad_send_wr->mad_agent_priv->send_list);
2626 }
2627 return ret;
2628}
2629
David Howellsc4028952006-11-22 14:57:56 +00002630static void timeout_sends(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631{
2632 struct ib_mad_agent_private *mad_agent_priv;
2633 struct ib_mad_send_wr_private *mad_send_wr;
2634 struct ib_mad_send_wc mad_send_wc;
2635 unsigned long flags, delay;
2636
David Howellsc4028952006-11-22 14:57:56 +00002637 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2638 timed_work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 mad_send_wc.vendor_err = 0;
2640
2641 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2642 while (!list_empty(&mad_agent_priv->wait_list)) {
2643 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2644 struct ib_mad_send_wr_private,
2645 agent_list);
2646
2647 if (time_after(mad_send_wr->timeout, jiffies)) {
2648 delay = mad_send_wr->timeout - jiffies;
2649 if ((long)delay <= 0)
2650 delay = 1;
2651 queue_delayed_work(mad_agent_priv->qp_info->
2652 port_priv->wq,
2653 &mad_agent_priv->timed_work, delay);
2654 break;
2655 }
2656
Hal Rosenstockdbf92272005-07-27 11:45:30 -07002657 list_del(&mad_send_wr->agent_list);
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002658 if (mad_send_wr->status == IB_WC_SUCCESS &&
2659 !retry_send(mad_send_wr))
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002660 continue;
2661
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2663
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002664 if (mad_send_wr->status == IB_WC_SUCCESS)
2665 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2666 else
2667 mad_send_wc.status = mad_send_wr->status;
Sean Hefty34816ad2005-10-25 10:51:39 -07002668 mad_send_wc.send_buf = &mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2670 &mad_send_wc);
2671
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 atomic_dec(&mad_agent_priv->refcount);
2673 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2674 }
2675 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2676}
2677
Hal Rosenstock5dd2ce12005-08-15 14:16:36 -07002678static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679{
2680 struct ib_mad_port_private *port_priv = cq->cq_context;
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002681 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682
Michael S. Tsirkindc059802006-03-20 10:08:25 -08002683 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2684 if (!list_empty(&port_priv->port_list))
2685 queue_work(port_priv->wq, &port_priv->work);
2686 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687}
2688
2689/*
2690 * Allocate receive MADs and post receive WRs for them
2691 */
2692static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2693 struct ib_mad_private *mad)
2694{
2695 unsigned long flags;
2696 int post, ret;
2697 struct ib_mad_private *mad_priv;
2698 struct ib_sge sg_list;
2699 struct ib_recv_wr recv_wr, *bad_recv_wr;
2700 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2701
2702 /* Initialize common scatter list fields */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2704
2705 /* Initialize common receive WR fields */
2706 recv_wr.next = NULL;
2707 recv_wr.sg_list = &sg_list;
2708 recv_wr.num_sge = 1;
2709
2710 do {
2711 /* Allocate and map receive buffer */
2712 if (mad) {
2713 mad_priv = mad;
2714 mad = NULL;
2715 } else {
Ira Weinyc9082e52015-06-06 14:38:30 -04002716 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2717 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 if (!mad_priv) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002719 dev_err(&qp_info->port_priv->device->dev,
2720 "No memory for receive buffer\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 ret = -ENOMEM;
2722 break;
2723 }
2724 }
Ira Weinyc9082e52015-06-06 14:38:30 -04002725 sg_list.length = mad_priv_dma_size(mad_priv);
Ralph Campbell15271062006-12-12 14:28:30 -08002726 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2727 &mad_priv->grh,
Ira Weinyc9082e52015-06-06 14:38:30 -04002728 mad_priv_dma_size(mad_priv),
Ralph Campbell15271062006-12-12 14:28:30 -08002729 DMA_FROM_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02002730 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2731 sg_list.addr))) {
2732 ret = -ENOMEM;
2733 break;
2734 }
Ralph Campbell15271062006-12-12 14:28:30 -08002735 mad_priv->header.mapping = sg_list.addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2737 mad_priv->header.mad_list.mad_queue = recv_queue;
2738
2739 /* Post receive WR */
2740 spin_lock_irqsave(&recv_queue->lock, flags);
2741 post = (++recv_queue->count < recv_queue->max_active);
2742 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2743 spin_unlock_irqrestore(&recv_queue->lock, flags);
2744 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2745 if (ret) {
2746 spin_lock_irqsave(&recv_queue->lock, flags);
2747 list_del(&mad_priv->header.mad_list.list);
2748 recv_queue->count--;
2749 spin_unlock_irqrestore(&recv_queue->lock, flags);
Ralph Campbell15271062006-12-12 14:28:30 -08002750 ib_dma_unmap_single(qp_info->port_priv->device,
2751 mad_priv->header.mapping,
Ira Weinyc9082e52015-06-06 14:38:30 -04002752 mad_priv_dma_size(mad_priv),
Ralph Campbell15271062006-12-12 14:28:30 -08002753 DMA_FROM_DEVICE);
Ira Weinyc9082e52015-06-06 14:38:30 -04002754 kfree(mad_priv);
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002755 dev_err(&qp_info->port_priv->device->dev,
2756 "ib_post_recv failed: %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 break;
2758 }
2759 } while (post);
2760
2761 return ret;
2762}
2763
2764/*
2765 * Return all the posted receive MADs
2766 */
2767static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2768{
2769 struct ib_mad_private_header *mad_priv_hdr;
2770 struct ib_mad_private *recv;
2771 struct ib_mad_list_head *mad_list;
2772
Eli Cohenfac70d52010-09-27 17:51:11 -07002773 if (!qp_info->qp)
2774 return;
2775
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 while (!list_empty(&qp_info->recv_queue.list)) {
2777
2778 mad_list = list_entry(qp_info->recv_queue.list.next,
2779 struct ib_mad_list_head, list);
2780 mad_priv_hdr = container_of(mad_list,
2781 struct ib_mad_private_header,
2782 mad_list);
2783 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2784 header);
2785
2786 /* Remove from posted receive MAD list */
2787 list_del(&mad_list->list);
2788
Ralph Campbell15271062006-12-12 14:28:30 -08002789 ib_dma_unmap_single(qp_info->port_priv->device,
2790 recv->header.mapping,
Ira Weinyc9082e52015-06-06 14:38:30 -04002791 mad_priv_dma_size(recv),
Ralph Campbell15271062006-12-12 14:28:30 -08002792 DMA_FROM_DEVICE);
Ira Weinyc9082e52015-06-06 14:38:30 -04002793 kfree(recv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794 }
2795
2796 qp_info->recv_queue.count = 0;
2797}
2798
2799/*
2800 * Start the port
2801 */
2802static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2803{
2804 int ret, i;
2805 struct ib_qp_attr *attr;
2806 struct ib_qp *qp;
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002807 u16 pkey_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808
2809 attr = kmalloc(sizeof *attr, GFP_KERNEL);
Roland Dreier3cd96562006-09-22 15:22:46 -07002810 if (!attr) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002811 dev_err(&port_priv->device->dev,
2812 "Couldn't kmalloc ib_qp_attr\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 return -ENOMEM;
2814 }
2815
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002816 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2817 IB_DEFAULT_PKEY_FULL, &pkey_index);
2818 if (ret)
2819 pkey_index = 0;
2820
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2822 qp = port_priv->qp_info[i].qp;
Eli Cohenfac70d52010-09-27 17:51:11 -07002823 if (!qp)
2824 continue;
2825
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 /*
2827 * PKey index for QP1 is irrelevant but
2828 * one is needed for the Reset to Init transition
2829 */
2830 attr->qp_state = IB_QPS_INIT;
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002831 attr->pkey_index = pkey_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2833 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2834 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2835 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002836 dev_err(&port_priv->device->dev,
2837 "Couldn't change QP%d state to INIT: %d\n",
2838 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 goto out;
2840 }
2841
2842 attr->qp_state = IB_QPS_RTR;
2843 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2844 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002845 dev_err(&port_priv->device->dev,
2846 "Couldn't change QP%d state to RTR: %d\n",
2847 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 goto out;
2849 }
2850
2851 attr->qp_state = IB_QPS_RTS;
2852 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2853 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2854 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002855 dev_err(&port_priv->device->dev,
2856 "Couldn't change QP%d state to RTS: %d\n",
2857 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 goto out;
2859 }
2860 }
2861
2862 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2863 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002864 dev_err(&port_priv->device->dev,
2865 "Failed to request completion notification: %d\n",
2866 ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 goto out;
2868 }
2869
2870 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
Eli Cohenfac70d52010-09-27 17:51:11 -07002871 if (!port_priv->qp_info[i].qp)
2872 continue;
2873
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2875 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002876 dev_err(&port_priv->device->dev,
2877 "Couldn't post receive WRs\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 goto out;
2879 }
2880 }
2881out:
2882 kfree(attr);
2883 return ret;
2884}
2885
2886static void qp_event_handler(struct ib_event *event, void *qp_context)
2887{
2888 struct ib_mad_qp_info *qp_info = qp_context;
2889
2890 /* It's worse than that! He's dead, Jim! */
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002891 dev_err(&qp_info->port_priv->device->dev,
2892 "Fatal error (%d) on MAD QP (%d)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 event->event, qp_info->qp->qp_num);
2894}
2895
2896static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2897 struct ib_mad_queue *mad_queue)
2898{
2899 mad_queue->qp_info = qp_info;
2900 mad_queue->count = 0;
2901 spin_lock_init(&mad_queue->lock);
2902 INIT_LIST_HEAD(&mad_queue->list);
2903}
2904
2905static void init_mad_qp(struct ib_mad_port_private *port_priv,
2906 struct ib_mad_qp_info *qp_info)
2907{
2908 qp_info->port_priv = port_priv;
2909 init_mad_queue(qp_info, &qp_info->send_queue);
2910 init_mad_queue(qp_info, &qp_info->recv_queue);
2911 INIT_LIST_HEAD(&qp_info->overflow_list);
2912 spin_lock_init(&qp_info->snoop_lock);
2913 qp_info->snoop_table = NULL;
2914 qp_info->snoop_table_size = 0;
2915 atomic_set(&qp_info->snoop_count, 0);
2916}
2917
2918static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2919 enum ib_qp_type qp_type)
2920{
2921 struct ib_qp_init_attr qp_init_attr;
2922 int ret;
2923
2924 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2925 qp_init_attr.send_cq = qp_info->port_priv->cq;
2926 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2927 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07002928 qp_init_attr.cap.max_send_wr = mad_sendq_size;
2929 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2931 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2932 qp_init_attr.qp_type = qp_type;
2933 qp_init_attr.port_num = qp_info->port_priv->port_num;
2934 qp_init_attr.qp_context = qp_info;
2935 qp_init_attr.event_handler = qp_event_handler;
2936 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2937 if (IS_ERR(qp_info->qp)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002938 dev_err(&qp_info->port_priv->device->dev,
2939 "Couldn't create ib_mad QP%d\n",
2940 get_spl_qp_index(qp_type));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941 ret = PTR_ERR(qp_info->qp);
2942 goto error;
2943 }
2944 /* Use minimum queue sizes unless the CQ is resized */
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07002945 qp_info->send_queue.max_active = mad_sendq_size;
2946 qp_info->recv_queue.max_active = mad_recvq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947 return 0;
2948
2949error:
2950 return ret;
2951}
2952
2953static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2954{
Eli Cohenfac70d52010-09-27 17:51:11 -07002955 if (!qp_info->qp)
2956 return;
2957
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958 ib_destroy_qp(qp_info->qp);
Jesper Juhl6044ec82005-11-07 01:01:32 -08002959 kfree(qp_info->snoop_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960}
2961
2962/*
2963 * Open the port
2964 * Create the QP, PD, MR, and CQ if needed
2965 */
2966static int ib_mad_port_open(struct ib_device *device,
2967 int port_num)
2968{
2969 int ret, cq_size;
2970 struct ib_mad_port_private *port_priv;
2971 unsigned long flags;
2972 char name[sizeof "ib_mad123"];
Eli Cohenfac70d52010-09-27 17:51:11 -07002973 int has_smi;
Matan Barak8e372102015-06-11 16:35:21 +03002974 struct ib_cq_init_attr cq_attr = {};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975
Ira Weiny337877a2015-06-06 14:38:29 -04002976 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
2977 return -EFAULT;
2978
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 /* Create new device info */
Roland Dreierde6eb662005-11-02 07:23:14 -08002980 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981 if (!port_priv) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002982 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983 return -ENOMEM;
2984 }
Roland Dreierde6eb662005-11-02 07:23:14 -08002985
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 port_priv->device = device;
2987 port_priv->port_num = port_num;
2988 spin_lock_init(&port_priv->reg_lock);
2989 INIT_LIST_HEAD(&port_priv->agent_list);
2990 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2991 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2992
Eli Cohenfac70d52010-09-27 17:51:11 -07002993 cq_size = mad_sendq_size + mad_recvq_size;
Michael Wang29541e32015-05-05 14:50:33 +02002994 has_smi = rdma_cap_ib_smi(device, port_num);
Eli Cohenfac70d52010-09-27 17:51:11 -07002995 if (has_smi)
2996 cq_size *= 2;
2997
Matan Barak8e372102015-06-11 16:35:21 +03002998 cq_attr.cqe = cq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 port_priv->cq = ib_create_cq(port_priv->device,
Hal Rosenstock5dd2ce12005-08-15 14:16:36 -07003000 ib_mad_thread_completion_handler,
Matan Barak8e372102015-06-11 16:35:21 +03003001 NULL, port_priv, &cq_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 if (IS_ERR(port_priv->cq)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003003 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 ret = PTR_ERR(port_priv->cq);
3005 goto error3;
3006 }
3007
3008 port_priv->pd = ib_alloc_pd(device);
3009 if (IS_ERR(port_priv->pd)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003010 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011 ret = PTR_ERR(port_priv->pd);
3012 goto error4;
3013 }
3014
3015 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
3016 if (IS_ERR(port_priv->mr)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003017 dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018 ret = PTR_ERR(port_priv->mr);
3019 goto error5;
3020 }
3021
Eli Cohenfac70d52010-09-27 17:51:11 -07003022 if (has_smi) {
3023 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3024 if (ret)
3025 goto error6;
3026 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3028 if (ret)
3029 goto error7;
3030
3031 snprintf(name, sizeof name, "ib_mad%d", port_num);
3032 port_priv->wq = create_singlethread_workqueue(name);
3033 if (!port_priv->wq) {
3034 ret = -ENOMEM;
3035 goto error8;
3036 }
David Howellsc4028952006-11-22 14:57:56 +00003037 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038
Michael S. Tsirkindc059802006-03-20 10:08:25 -08003039 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3040 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3041 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3042
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043 ret = ib_mad_port_start(port_priv);
3044 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003045 dev_err(&device->dev, "Couldn't start port\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046 goto error9;
3047 }
3048
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 return 0;
3050
3051error9:
Michael S. Tsirkindc059802006-03-20 10:08:25 -08003052 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3053 list_del_init(&port_priv->port_list);
3054 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3055
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 destroy_workqueue(port_priv->wq);
3057error8:
3058 destroy_mad_qp(&port_priv->qp_info[1]);
3059error7:
3060 destroy_mad_qp(&port_priv->qp_info[0]);
3061error6:
3062 ib_dereg_mr(port_priv->mr);
3063error5:
3064 ib_dealloc_pd(port_priv->pd);
3065error4:
3066 ib_destroy_cq(port_priv->cq);
3067 cleanup_recv_queue(&port_priv->qp_info[1]);
3068 cleanup_recv_queue(&port_priv->qp_info[0]);
3069error3:
3070 kfree(port_priv);
3071
3072 return ret;
3073}
3074
3075/*
3076 * Close the port
3077 * If there are no classes using the port, free the port
3078 * resources (CQ, MR, PD, QP) and remove the port's info structure
3079 */
3080static int ib_mad_port_close(struct ib_device *device, int port_num)
3081{
3082 struct ib_mad_port_private *port_priv;
3083 unsigned long flags;
3084
3085 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3086 port_priv = __ib_get_mad_port(device, port_num);
3087 if (port_priv == NULL) {
3088 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003089 dev_err(&device->dev, "Port %d not found\n", port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090 return -ENODEV;
3091 }
Michael S. Tsirkindc059802006-03-20 10:08:25 -08003092 list_del_init(&port_priv->port_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3094
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 destroy_workqueue(port_priv->wq);
3096 destroy_mad_qp(&port_priv->qp_info[1]);
3097 destroy_mad_qp(&port_priv->qp_info[0]);
3098 ib_dereg_mr(port_priv->mr);
3099 ib_dealloc_pd(port_priv->pd);
3100 ib_destroy_cq(port_priv->cq);
3101 cleanup_recv_queue(&port_priv->qp_info[1]);
3102 cleanup_recv_queue(&port_priv->qp_info[0]);
3103 /* XXX: Handle deallocation of MAD registration tables */
3104
3105 kfree(port_priv);
3106
3107 return 0;
3108}
3109
3110static void ib_mad_init_device(struct ib_device *device)
3111{
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003112 int start, end, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113
Tom Tucker07ebafb2006-08-03 16:02:42 -05003114 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003115 start = 0;
3116 end = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117 } else {
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003118 start = 1;
3119 end = device->phys_port_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120 }
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003121
3122 for (i = start; i <= end; i++) {
Michael Wangc757dea2015-05-05 14:50:32 +02003123 if (!rdma_cap_ib_mad(device, i))
Michael Wang827f2a82015-05-05 14:50:20 +02003124 continue;
3125
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003126 if (ib_mad_port_open(device, i)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003127 dev_err(&device->dev, "Couldn't open port %d\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003128 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 }
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003130 if (ib_agent_port_open(device, i)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003131 dev_err(&device->dev,
3132 "Couldn't open port %d for agents\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003133 goto error_agent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134 }
3135 }
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07003136 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003138error_agent:
3139 if (ib_mad_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003140 dev_err(&device->dev, "Couldn't close port %d\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003141
3142error:
Michael Wang827f2a82015-05-05 14:50:20 +02003143 while (--i >= start) {
Michael Wangc757dea2015-05-05 14:50:32 +02003144 if (!rdma_cap_ib_mad(device, i))
Michael Wang827f2a82015-05-05 14:50:20 +02003145 continue;
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003146
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003147 if (ib_agent_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003148 dev_err(&device->dev,
3149 "Couldn't close port %d for agents\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003150 if (ib_mad_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003151 dev_err(&device->dev, "Couldn't close port %d\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153}
3154
3155static void ib_mad_remove_device(struct ib_device *device)
3156{
Michael Wang827f2a82015-05-05 14:50:20 +02003157 int start, end, i;
Steve Wise070e1402010-03-04 18:18:18 +00003158
Tom Tucker07ebafb2006-08-03 16:02:42 -05003159 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Michael Wang827f2a82015-05-05 14:50:20 +02003160 start = 0;
3161 end = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162 } else {
Michael Wang827f2a82015-05-05 14:50:20 +02003163 start = 1;
3164 end = device->phys_port_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 }
Michael Wang827f2a82015-05-05 14:50:20 +02003166
3167 for (i = start; i <= end; i++) {
Michael Wangc757dea2015-05-05 14:50:32 +02003168 if (!rdma_cap_ib_mad(device, i))
Michael Wang827f2a82015-05-05 14:50:20 +02003169 continue;
3170
3171 if (ib_agent_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003172 dev_err(&device->dev,
Michael Wang827f2a82015-05-05 14:50:20 +02003173 "Couldn't close port %d for agents\n", i);
3174 if (ib_mad_port_close(device, i))
3175 dev_err(&device->dev, "Couldn't close port %d\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176 }
3177}
3178
3179static struct ib_client mad_client = {
3180 .name = "mad",
3181 .add = ib_mad_init_device,
3182 .remove = ib_mad_remove_device
3183};
3184
3185static int __init ib_mad_init_module(void)
3186{
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07003187 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3188 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3189
3190 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3191 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3192
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193 INIT_LIST_HEAD(&ib_mad_port_list);
3194
3195 if (ib_register_client(&mad_client)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003196 pr_err("Couldn't register ib_mad client\n");
Ira Weinyc9082e52015-06-06 14:38:30 -04003197 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198 }
3199
3200 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201}
3202
3203static void __exit ib_mad_cleanup_module(void)
3204{
3205 ib_unregister_client(&mad_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206}
3207
3208module_init(ib_mad_init_module);
3209module_exit(ib_mad_cleanup_module);