blob: bc2a9c2caa3c0561f8e1f72f3996769afa25c201 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Hal Rosenstockde493d42007-04-02 11:24:07 -04002 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
Hal Rosenstockfa619a72005-07-27 11:45:37 -07003 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07005 * Copyright (c) 2009 HNR Consulting. All rights reserved.
Ira Weiny8e4349d2015-06-10 16:16:48 -04006 * Copyright (c) 2014 Intel Corporation. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 */
Ira Weiny7ef5d4b2014-08-08 19:00:53 -040037
38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090041#include <linux/slab.h>
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040042#include <linux/module.h>
Jack Morgenstein9874e742006-06-17 20:37:34 -070043#include <rdma/ib_cache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45#include "mad_priv.h"
Hal Rosenstockfa619a72005-07-27 11:45:37 -070046#include "mad_rmpp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include "smi.h"
Ira Weiny8e4349d2015-06-10 16:16:48 -040048#include "opa_smi.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include "agent.h"
Mark Bloch4c2cb422016-05-19 17:12:32 +030050#include "core_priv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Roland Dreier16933952010-05-23 21:39:31 -070052static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
53static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
Hal Rosenstockb76aabc2009-09-07 08:28:48 -070054
55module_param_named(send_queue_size, mad_sendq_size, int, 0444);
56MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
57module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
58MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060static struct list_head ib_mad_port_list;
61static u32 ib_mad_client_id = 0;
62
63/* Port list lock */
Roland Dreier6276e082009-09-05 20:24:23 -070064static DEFINE_SPINLOCK(ib_mad_port_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66/* Forward declarations */
67static int method_in_use(struct ib_mad_mgmt_method_table **method,
68 struct ib_mad_reg_req *mad_reg_req);
69static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
70static struct ib_mad_agent_private *find_mad_agent(
71 struct ib_mad_port_private *port_priv,
Ira Weinyd94bd262015-06-06 14:38:22 -040072 const struct ib_mad_hdr *mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
74 struct ib_mad_private *mad);
75static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
David Howellsc4028952006-11-22 14:57:56 +000076static void timeout_sends(struct work_struct *work);
77static void local_completions(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
79 struct ib_mad_agent_private *agent_priv,
80 u8 mgmt_class);
81static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
82 struct ib_mad_agent_private *agent_priv);
Christoph Hellwigd53e11f2016-01-05 22:46:12 -080083static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
84 struct ib_wc *wc);
85static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87/*
88 * Returns a ib_mad_port_private structure or NULL for a device/port
89 * Assumes ib_mad_port_list_lock is being held
90 */
91static inline struct ib_mad_port_private *
92__ib_get_mad_port(struct ib_device *device, int port_num)
93{
94 struct ib_mad_port_private *entry;
95
96 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
97 if (entry->device == device && entry->port_num == port_num)
98 return entry;
99 }
100 return NULL;
101}
102
103/*
104 * Wrapper function to return a ib_mad_port_private structure or NULL
105 * for a device/port
106 */
107static inline struct ib_mad_port_private *
108ib_get_mad_port(struct ib_device *device, int port_num)
109{
110 struct ib_mad_port_private *entry;
111 unsigned long flags;
112
113 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
114 entry = __ib_get_mad_port(device, port_num);
115 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
116
117 return entry;
118}
119
120static inline u8 convert_mgmt_class(u8 mgmt_class)
121{
122 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
123 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
124 0 : mgmt_class;
125}
126
127static int get_spl_qp_index(enum ib_qp_type qp_type)
128{
129 switch (qp_type)
130 {
131 case IB_QPT_SMI:
132 return 0;
133 case IB_QPT_GSI:
134 return 1;
135 default:
136 return -1;
137 }
138}
139
140static int vendor_class_index(u8 mgmt_class)
141{
142 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
143}
144
145static int is_vendor_class(u8 mgmt_class)
146{
147 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
148 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
149 return 0;
150 return 1;
151}
152
153static int is_vendor_oui(char *oui)
154{
155 if (oui[0] || oui[1] || oui[2])
156 return 1;
157 return 0;
158}
159
160static int is_vendor_method_in_use(
161 struct ib_mad_mgmt_vendor_class *vendor_class,
162 struct ib_mad_reg_req *mad_reg_req)
163{
164 struct ib_mad_mgmt_method_table *method;
165 int i;
166
167 for (i = 0; i < MAX_MGMT_OUI; i++) {
168 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
169 method = vendor_class->method_table[i];
170 if (method) {
171 if (method_in_use(&method, mad_reg_req))
172 return 1;
173 else
174 break;
175 }
176 }
177 }
178 return 0;
179}
180
Ira Weiny96909302015-05-08 14:27:22 -0400181int ib_response_mad(const struct ib_mad_hdr *hdr)
Sean Hefty2527e682006-07-20 11:25:50 +0300182{
Ira Weiny96909302015-05-08 14:27:22 -0400183 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
184 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
185 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
186 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
Sean Hefty2527e682006-07-20 11:25:50 +0300187}
188EXPORT_SYMBOL(ib_response_mad);
189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190/*
191 * ib_register_mad_agent - Register to send/receive MADs
192 */
193struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
194 u8 port_num,
195 enum ib_qp_type qp_type,
196 struct ib_mad_reg_req *mad_reg_req,
197 u8 rmpp_version,
198 ib_mad_send_handler send_handler,
199 ib_mad_recv_handler recv_handler,
Ira Weiny0f29b462014-08-08 19:00:55 -0400200 void *context,
201 u32 registration_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202{
203 struct ib_mad_port_private *port_priv;
204 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
205 struct ib_mad_agent_private *mad_agent_priv;
206 struct ib_mad_reg_req *reg_req = NULL;
207 struct ib_mad_mgmt_class_table *class;
208 struct ib_mad_mgmt_vendor_class_table *vendor;
209 struct ib_mad_mgmt_vendor_class *vendor_class;
210 struct ib_mad_mgmt_method_table *method;
211 int ret2, qpn;
212 unsigned long flags;
213 u8 mgmt_class, vclass;
214
215 /* Validate parameters */
216 qpn = get_spl_qp_index(qp_type);
Ira Weiny9ad13a42014-08-08 19:00:54 -0400217 if (qpn == -1) {
218 dev_notice(&device->dev,
219 "ib_register_mad_agent: invalid QP Type %d\n",
220 qp_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400222 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Ira Weiny9ad13a42014-08-08 19:00:54 -0400224 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
225 dev_notice(&device->dev,
226 "ib_register_mad_agent: invalid RMPP Version %u\n",
227 rmpp_version);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700228 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400229 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231 /* Validate MAD registration request if supplied */
232 if (mad_reg_req) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400233 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
234 dev_notice(&device->dev,
235 "ib_register_mad_agent: invalid Class Version %u\n",
236 mad_reg_req->mgmt_class_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400238 }
239 if (!recv_handler) {
240 dev_notice(&device->dev,
241 "ib_register_mad_agent: no recv_handler\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400243 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
245 /*
246 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
247 * one in this range currently allowed
248 */
249 if (mad_reg_req->mgmt_class !=
Ira Weiny9ad13a42014-08-08 19:00:54 -0400250 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
251 dev_notice(&device->dev,
252 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
253 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400255 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 } else if (mad_reg_req->mgmt_class == 0) {
257 /*
258 * Class 0 is reserved in IBA and is used for
259 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
260 */
Ira Weiny9ad13a42014-08-08 19:00:54 -0400261 dev_notice(&device->dev,
262 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 goto error1;
264 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
265 /*
266 * If class is in "new" vendor range,
267 * ensure supplied OUI is not zero
268 */
Ira Weiny9ad13a42014-08-08 19:00:54 -0400269 if (!is_vendor_oui(mad_reg_req->oui)) {
270 dev_notice(&device->dev,
271 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
272 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400274 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 }
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800276 /* Make sure class supplied is consistent with RMPP */
Hal Rosenstock64cb9c62006-04-12 21:29:10 -0400277 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400278 if (rmpp_version) {
279 dev_notice(&device->dev,
280 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
281 mad_reg_req->mgmt_class);
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800282 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400283 }
Hal Rosenstock618a3c02006-03-28 16:40:04 -0800284 }
Ira Weiny1471cb62014-08-08 19:00:56 -0400285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 /* Make sure class supplied is consistent with QP type */
287 if (qp_type == IB_QPT_SMI) {
288 if ((mad_reg_req->mgmt_class !=
289 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
290 (mad_reg_req->mgmt_class !=
Ira Weiny9ad13a42014-08-08 19:00:54 -0400291 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
292 dev_notice(&device->dev,
293 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
294 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400296 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 } else {
298 if ((mad_reg_req->mgmt_class ==
299 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
300 (mad_reg_req->mgmt_class ==
Ira Weiny9ad13a42014-08-08 19:00:54 -0400301 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
302 dev_notice(&device->dev,
303 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
304 mad_reg_req->mgmt_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 goto error1;
Ira Weiny9ad13a42014-08-08 19:00:54 -0400306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 }
308 } else {
309 /* No registration request supplied */
310 if (!send_handler)
311 goto error1;
Ira Weiny1471cb62014-08-08 19:00:56 -0400312 if (registration_flags & IB_MAD_USER_RMPP)
313 goto error1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 }
315
316 /* Validate device and port */
317 port_priv = ib_get_mad_port(device, port_num);
318 if (!port_priv) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400319 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 ret = ERR_PTR(-ENODEV);
321 goto error1;
322 }
323
Ira Weinyc8367c42011-05-19 18:19:28 -0700324 /* Verify the QP requested is supported. For example, Ethernet devices
325 * will not have QP0 */
326 if (!port_priv->qp_info[qpn].qp) {
Ira Weiny9ad13a42014-08-08 19:00:54 -0400327 dev_notice(&device->dev,
328 "ib_register_mad_agent: QP %d not supported\n", qpn);
Ira Weinyc8367c42011-05-19 18:19:28 -0700329 ret = ERR_PTR(-EPROTONOSUPPORT);
330 goto error1;
331 }
332
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 /* Allocate structures */
Roland Dreierde6eb662005-11-02 07:23:14 -0800334 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 if (!mad_agent_priv) {
336 ret = ERR_PTR(-ENOMEM);
337 goto error1;
338 }
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 if (mad_reg_req) {
Julia Lawall9893e742010-05-15 23:22:38 +0200341 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 if (!reg_req) {
343 ret = ERR_PTR(-ENOMEM);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700344 goto error3;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 }
347
348 /* Now, fill in the various structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
350 mad_agent_priv->reg_req = reg_req;
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700351 mad_agent_priv->agent.rmpp_version = rmpp_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 mad_agent_priv->agent.device = device;
353 mad_agent_priv->agent.recv_handler = recv_handler;
354 mad_agent_priv->agent.send_handler = send_handler;
355 mad_agent_priv->agent.context = context;
356 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
357 mad_agent_priv->agent.port_num = port_num;
Ira Weiny0f29b462014-08-08 19:00:55 -0400358 mad_agent_priv->agent.flags = registration_flags;
Ralph Campbelld9620a42009-02-27 14:44:32 -0800359 spin_lock_init(&mad_agent_priv->lock);
360 INIT_LIST_HEAD(&mad_agent_priv->send_list);
361 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
362 INIT_LIST_HEAD(&mad_agent_priv->done_list);
363 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
364 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
365 INIT_LIST_HEAD(&mad_agent_priv->local_list);
366 INIT_WORK(&mad_agent_priv->local_work, local_completions);
367 atomic_set(&mad_agent_priv->refcount, 1);
368 init_completion(&mad_agent_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
370 spin_lock_irqsave(&port_priv->reg_lock, flags);
371 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
372
373 /*
374 * Make sure MAD registration (if supplied)
375 * is non overlapping with any existing ones
376 */
377 if (mad_reg_req) {
378 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
379 if (!is_vendor_class(mgmt_class)) {
380 class = port_priv->version[mad_reg_req->
381 mgmt_class_version].class;
382 if (class) {
383 method = class->method_table[mgmt_class];
384 if (method) {
385 if (method_in_use(&method,
386 mad_reg_req))
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700387 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 }
389 }
390 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
391 mgmt_class);
392 } else {
393 /* "New" vendor class range */
394 vendor = port_priv->version[mad_reg_req->
395 mgmt_class_version].vendor;
396 if (vendor) {
397 vclass = vendor_class_index(mgmt_class);
398 vendor_class = vendor->vendor_class[vclass];
399 if (vendor_class) {
400 if (is_vendor_method_in_use(
401 vendor_class,
402 mad_reg_req))
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700403 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 }
405 }
406 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
407 }
408 if (ret2) {
409 ret = ERR_PTR(ret2);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700410 goto error4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 }
412 }
413
414 /* Add mad agent into port's agent list */
415 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
416 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 return &mad_agent_priv->agent;
419
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700420error4:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
422 kfree(reg_req);
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700423error3:
Adrian Bunk2012a112005-11-27 00:37:36 +0100424 kfree(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425error1:
426 return ret;
427}
428EXPORT_SYMBOL(ib_register_mad_agent);
429
430static inline int is_snooping_sends(int mad_snoop_flags)
431{
432 return (mad_snoop_flags &
433 (/*IB_MAD_SNOOP_POSTED_SENDS |
434 IB_MAD_SNOOP_RMPP_SENDS |*/
435 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
436 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
437}
438
439static inline int is_snooping_recvs(int mad_snoop_flags)
440{
441 return (mad_snoop_flags &
442 (IB_MAD_SNOOP_RECVS /*|
443 IB_MAD_SNOOP_RMPP_RECVS*/));
444}
445
446static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
447 struct ib_mad_snoop_private *mad_snoop_priv)
448{
449 struct ib_mad_snoop_private **new_snoop_table;
450 unsigned long flags;
451 int i;
452
453 spin_lock_irqsave(&qp_info->snoop_lock, flags);
454 /* Check for empty slot in array. */
455 for (i = 0; i < qp_info->snoop_table_size; i++)
456 if (!qp_info->snoop_table[i])
457 break;
458
459 if (i == qp_info->snoop_table_size) {
460 /* Grow table. */
Roland Dreier528051742008-10-14 14:05:36 -0700461 new_snoop_table = krealloc(qp_info->snoop_table,
462 sizeof mad_snoop_priv *
463 (qp_info->snoop_table_size + 1),
464 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 if (!new_snoop_table) {
466 i = -ENOMEM;
467 goto out;
468 }
Roland Dreier528051742008-10-14 14:05:36 -0700469
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 qp_info->snoop_table = new_snoop_table;
471 qp_info->snoop_table_size++;
472 }
473 qp_info->snoop_table[i] = mad_snoop_priv;
474 atomic_inc(&qp_info->snoop_count);
475out:
476 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
477 return i;
478}
479
480struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
481 u8 port_num,
482 enum ib_qp_type qp_type,
483 int mad_snoop_flags,
484 ib_mad_snoop_handler snoop_handler,
485 ib_mad_recv_handler recv_handler,
486 void *context)
487{
488 struct ib_mad_port_private *port_priv;
489 struct ib_mad_agent *ret;
490 struct ib_mad_snoop_private *mad_snoop_priv;
491 int qpn;
492
493 /* Validate parameters */
494 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
495 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
496 ret = ERR_PTR(-EINVAL);
497 goto error1;
498 }
499 qpn = get_spl_qp_index(qp_type);
500 if (qpn == -1) {
501 ret = ERR_PTR(-EINVAL);
502 goto error1;
503 }
504 port_priv = ib_get_mad_port(device, port_num);
505 if (!port_priv) {
506 ret = ERR_PTR(-ENODEV);
507 goto error1;
508 }
509 /* Allocate structures */
Roland Dreierde6eb662005-11-02 07:23:14 -0800510 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 if (!mad_snoop_priv) {
512 ret = ERR_PTR(-ENOMEM);
513 goto error1;
514 }
515
516 /* Now, fill in the various structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
518 mad_snoop_priv->agent.device = device;
519 mad_snoop_priv->agent.recv_handler = recv_handler;
520 mad_snoop_priv->agent.snoop_handler = snoop_handler;
521 mad_snoop_priv->agent.context = context;
522 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
523 mad_snoop_priv->agent.port_num = port_num;
524 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
Sean Hefty1b52fa982006-05-12 14:57:52 -0700525 init_completion(&mad_snoop_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 mad_snoop_priv->snoop_index = register_snoop_agent(
527 &port_priv->qp_info[qpn],
528 mad_snoop_priv);
529 if (mad_snoop_priv->snoop_index < 0) {
530 ret = ERR_PTR(mad_snoop_priv->snoop_index);
531 goto error2;
532 }
533
534 atomic_set(&mad_snoop_priv->refcount, 1);
535 return &mad_snoop_priv->agent;
536
537error2:
538 kfree(mad_snoop_priv);
539error1:
540 return ret;
541}
542EXPORT_SYMBOL(ib_register_mad_snoop);
543
Sean Hefty1b52fa982006-05-12 14:57:52 -0700544static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
545{
546 if (atomic_dec_and_test(&mad_agent_priv->refcount))
547 complete(&mad_agent_priv->comp);
548}
549
550static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
551{
552 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
553 complete(&mad_snoop_priv->comp);
554}
555
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
557{
558 struct ib_mad_port_private *port_priv;
559 unsigned long flags;
560
561 /* Note that we could still be handling received MADs */
562
563 /*
564 * Canceling all sends results in dropping received response
565 * MADs, preventing us from queuing additional work
566 */
567 cancel_mads(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 port_priv = mad_agent_priv->qp_info->port_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
571 spin_lock_irqsave(&port_priv->reg_lock, flags);
572 remove_mad_reg_req(mad_agent_priv);
573 list_del(&mad_agent_priv->agent_list);
574 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
575
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700576 flush_workqueue(port_priv->wq);
Hal Rosenstockfa619a72005-07-27 11:45:37 -0700577 ib_cancel_rmpp_recvs(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
Sean Hefty1b52fa982006-05-12 14:57:52 -0700579 deref_mad_agent(mad_agent_priv);
580 wait_for_completion(&mad_agent_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
Jesper Juhl6044ec82005-11-07 01:01:32 -0800582 kfree(mad_agent_priv->reg_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 kfree(mad_agent_priv);
584}
585
586static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
587{
588 struct ib_mad_qp_info *qp_info;
589 unsigned long flags;
590
591 qp_info = mad_snoop_priv->qp_info;
592 spin_lock_irqsave(&qp_info->snoop_lock, flags);
593 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
594 atomic_dec(&qp_info->snoop_count);
595 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
596
Sean Hefty1b52fa982006-05-12 14:57:52 -0700597 deref_snoop_agent(mad_snoop_priv);
598 wait_for_completion(&mad_snoop_priv->comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
600 kfree(mad_snoop_priv);
601}
602
603/*
604 * ib_unregister_mad_agent - Unregisters a client from using MAD services
605 */
606int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
607{
608 struct ib_mad_agent_private *mad_agent_priv;
609 struct ib_mad_snoop_private *mad_snoop_priv;
610
611 /* If the TID is zero, the agent can only snoop. */
612 if (mad_agent->hi_tid) {
613 mad_agent_priv = container_of(mad_agent,
614 struct ib_mad_agent_private,
615 agent);
616 unregister_mad_agent(mad_agent_priv);
617 } else {
618 mad_snoop_priv = container_of(mad_agent,
619 struct ib_mad_snoop_private,
620 agent);
621 unregister_mad_snoop(mad_snoop_priv);
622 }
623 return 0;
624}
625EXPORT_SYMBOL(ib_unregister_mad_agent);
626
627static void dequeue_mad(struct ib_mad_list_head *mad_list)
628{
629 struct ib_mad_queue *mad_queue;
630 unsigned long flags;
631
632 BUG_ON(!mad_list->mad_queue);
633 mad_queue = mad_list->mad_queue;
634 spin_lock_irqsave(&mad_queue->lock, flags);
635 list_del(&mad_list->list);
636 mad_queue->count--;
637 spin_unlock_irqrestore(&mad_queue->lock, flags);
638}
639
640static void snoop_send(struct ib_mad_qp_info *qp_info,
Sean Hefty34816ad2005-10-25 10:51:39 -0700641 struct ib_mad_send_buf *send_buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 struct ib_mad_send_wc *mad_send_wc,
643 int mad_snoop_flags)
644{
645 struct ib_mad_snoop_private *mad_snoop_priv;
646 unsigned long flags;
647 int i;
648
649 spin_lock_irqsave(&qp_info->snoop_lock, flags);
650 for (i = 0; i < qp_info->snoop_table_size; i++) {
651 mad_snoop_priv = qp_info->snoop_table[i];
652 if (!mad_snoop_priv ||
653 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
654 continue;
655
656 atomic_inc(&mad_snoop_priv->refcount);
657 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
658 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
Sean Hefty34816ad2005-10-25 10:51:39 -0700659 send_buf, mad_send_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -0700660 deref_snoop_agent(mad_snoop_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 spin_lock_irqsave(&qp_info->snoop_lock, flags);
662 }
663 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
664}
665
666static void snoop_recv(struct ib_mad_qp_info *qp_info,
667 struct ib_mad_recv_wc *mad_recv_wc,
668 int mad_snoop_flags)
669{
670 struct ib_mad_snoop_private *mad_snoop_priv;
671 unsigned long flags;
672 int i;
673
674 spin_lock_irqsave(&qp_info->snoop_lock, flags);
675 for (i = 0; i < qp_info->snoop_table_size; i++) {
676 mad_snoop_priv = qp_info->snoop_table[i];
677 if (!mad_snoop_priv ||
678 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
679 continue;
680
681 atomic_inc(&mad_snoop_priv->refcount);
682 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
Christoph Hellwigca281262016-01-04 14:15:58 +0100683 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 mad_recv_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -0700685 deref_snoop_agent(mad_snoop_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 spin_lock_irqsave(&qp_info->snoop_lock, flags);
687 }
688 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
689}
690
Christoph Hellwigd53e11f2016-01-05 22:46:12 -0800691static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
692 u16 pkey_index, u8 port_num, struct ib_wc *wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693{
694 memset(wc, 0, sizeof *wc);
Christoph Hellwigd53e11f2016-01-05 22:46:12 -0800695 wc->wr_cqe = cqe;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 wc->status = IB_WC_SUCCESS;
697 wc->opcode = IB_WC_RECV;
698 wc->pkey_index = pkey_index;
699 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
700 wc->src_qp = IB_QP0;
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200701 wc->qp = qp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 wc->slid = slid;
703 wc->sl = 0;
704 wc->dlid_path_bits = 0;
705 wc->port_num = port_num;
706}
707
Ira Weinyc9082e52015-06-06 14:38:30 -0400708static size_t mad_priv_size(const struct ib_mad_private *mp)
709{
710 return sizeof(struct ib_mad_private) + mp->mad_size;
711}
712
713static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
714{
715 size_t size = sizeof(struct ib_mad_private) + mad_size;
716 struct ib_mad_private *ret = kzalloc(size, flags);
717
718 if (ret)
719 ret->mad_size = mad_size;
720
721 return ret;
722}
723
724static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
725{
726 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
727}
728
729static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
730{
731 return sizeof(struct ib_grh) + mp->mad_size;
732}
733
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734/*
735 * Return 0 if SMP is to be sent
736 * Return 1 if SMP was consumed locally (whether or not solicited)
737 * Return < 0 if error
738 */
739static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
Sean Hefty34816ad2005-10-25 10:51:39 -0700740 struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741{
Hal Rosenstockde493d42007-04-02 11:24:07 -0400742 int ret = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -0700743 struct ib_smp *smp = mad_send_wr->send_buf.mad;
Ira Weiny8e4349d2015-06-10 16:16:48 -0400744 struct opa_smp *opa_smp = (struct opa_smp *)smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 unsigned long flags;
746 struct ib_mad_local_private *local;
747 struct ib_mad_private *mad_priv;
748 struct ib_mad_port_private *port_priv;
749 struct ib_mad_agent_private *recv_mad_agent = NULL;
750 struct ib_device *device = mad_agent_priv->agent.device;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -0400751 u8 port_num;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 struct ib_wc mad_wc;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100753 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
Ira Weinyc9082e52015-06-06 14:38:30 -0400754 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
Ira Weiny4cd7c942015-06-06 14:38:31 -0400755 u16 out_mad_pkey_index = 0;
Ira Weiny8e4349d2015-06-10 16:16:48 -0400756 u16 drslid;
757 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
758 mad_agent_priv->qp_info->port_priv->port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
Hal Rosenstock41390322015-06-29 09:57:00 -0400760 if (rdma_cap_ib_switch(device) &&
Hal Rosenstock1bae4db2007-05-14 17:21:52 -0400761 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100762 port_num = send_wr->port_num;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -0400763 else
764 port_num = mad_agent_priv->agent.port_num;
765
Ralph Campbell8cf3f042006-02-03 14:28:48 -0800766 /*
767 * Directed route handling starts if the initial LID routed part of
768 * a request or the ending LID routed part of a response is empty.
769 * If we are at the start of the LID routed part, don't update the
770 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
771 */
Hal Rosenstock9fa240b2016-10-18 13:20:29 -0400772 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
Ira Weiny8e4349d2015-06-10 16:16:48 -0400773 u32 opa_drslid;
Hal Rosenstockde493d42007-04-02 11:24:07 -0400774
Ira Weiny8e4349d2015-06-10 16:16:48 -0400775 if ((opa_get_smp_direction(opa_smp)
776 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
777 OPA_LID_PERMISSIVE &&
Hal Rosenstock41390322015-06-29 09:57:00 -0400778 opa_smi_handle_dr_smp_send(opa_smp,
779 rdma_cap_ib_switch(device),
Ira Weiny8e4349d2015-06-10 16:16:48 -0400780 port_num) == IB_SMI_DISCARD) {
781 ret = -EINVAL;
782 dev_err(&device->dev, "OPA Invalid directed route\n");
783 goto out;
784 }
785 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
Ira Weinycd4cd562015-06-25 12:04:49 -0400786 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
Ira Weiny8e4349d2015-06-10 16:16:48 -0400787 opa_drslid & 0xffff0000) {
788 ret = -EINVAL;
789 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
790 opa_drslid);
791 goto out;
792 }
793 drslid = (u16)(opa_drslid & 0x0000ffff);
794
795 /* Check to post send on QP or process locally */
796 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
797 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
798 goto out;
799 } else {
800 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
801 IB_LID_PERMISSIVE &&
Hal Rosenstock41390322015-06-29 09:57:00 -0400802 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
Ira Weiny8e4349d2015-06-10 16:16:48 -0400803 IB_SMI_DISCARD) {
804 ret = -EINVAL;
805 dev_err(&device->dev, "Invalid directed route\n");
806 goto out;
807 }
808 drslid = be16_to_cpu(smp->dr_slid);
809
810 /* Check to post send on QP or process locally */
811 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
812 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
813 goto out;
814 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815
816 local = kmalloc(sizeof *local, GFP_ATOMIC);
817 if (!local) {
818 ret = -ENOMEM;
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400819 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 goto out;
821 }
822 local->mad_priv = NULL;
823 local->recv_mad_agent = NULL;
Ira Weinyc9082e52015-06-06 14:38:30 -0400824 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 if (!mad_priv) {
826 ret = -ENOMEM;
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400827 dev_err(&device->dev, "No memory for local response MAD\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 kfree(local);
829 goto out;
830 }
831
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200832 build_smp_wc(mad_agent_priv->agent.qp,
Christoph Hellwigd53e11f2016-01-05 22:46:12 -0800833 send_wr->wr.wr_cqe, drslid,
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100834 send_wr->pkey_index,
835 send_wr->port_num, &mad_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
Ira Weiny8e4349d2015-06-10 16:16:48 -0400837 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
838 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
839 + mad_send_wr->send_buf.data_len
840 + sizeof(struct ib_grh);
841 }
842
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 /* No GRH for DR SMP */
844 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
Ira Weiny4cd7c942015-06-06 14:38:31 -0400845 (const struct ib_mad_hdr *)smp, mad_size,
846 (struct ib_mad_hdr *)mad_priv->mad,
847 &mad_size, &out_mad_pkey_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 switch (ret)
849 {
850 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
Ira Weinyc9082e52015-06-06 14:38:30 -0400851 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 mad_agent_priv->agent.recv_handler) {
853 local->mad_priv = mad_priv;
854 local->recv_mad_agent = mad_agent_priv;
855 /*
856 * Reference MAD agent until receive
857 * side of local completion handled
858 */
859 atomic_inc(&mad_agent_priv->refcount);
860 } else
Ira Weinyc9082e52015-06-06 14:38:30 -0400861 kfree(mad_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 break;
863 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
Ira Weinyc9082e52015-06-06 14:38:30 -0400864 kfree(mad_priv);
Ralph Campbell4780c192009-03-03 14:22:17 -0800865 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 case IB_MAD_RESULT_SUCCESS:
867 /* Treat like an incoming receive MAD */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
869 mad_agent_priv->agent.port_num);
870 if (port_priv) {
Ira Weinyc9082e52015-06-06 14:38:30 -0400871 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 recv_mad_agent = find_mad_agent(port_priv,
Ira Weinyc9082e52015-06-06 14:38:30 -0400873 (const struct ib_mad_hdr *)mad_priv->mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 }
875 if (!port_priv || !recv_mad_agent) {
Ralph Campbell4780c192009-03-03 14:22:17 -0800876 /*
877 * No receiving agent so drop packet and
878 * generate send completion.
879 */
Ira Weinyc9082e52015-06-06 14:38:30 -0400880 kfree(mad_priv);
Ralph Campbell4780c192009-03-03 14:22:17 -0800881 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 }
883 local->mad_priv = mad_priv;
884 local->recv_mad_agent = recv_mad_agent;
885 break;
886 default:
Ira Weinyc9082e52015-06-06 14:38:30 -0400887 kfree(mad_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 kfree(local);
889 ret = -EINVAL;
890 goto out;
891 }
892
Sean Hefty34816ad2005-10-25 10:51:39 -0700893 local->mad_send_wr = mad_send_wr;
Ira Weiny8e4349d2015-06-10 16:16:48 -0400894 if (opa) {
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100895 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
Ira Weiny8e4349d2015-06-10 16:16:48 -0400896 local->return_wc_byte_len = mad_size;
897 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 /* Reference MAD agent until send side of local completion handled */
899 atomic_inc(&mad_agent_priv->refcount);
900 /* Queue local completion to local list */
901 spin_lock_irqsave(&mad_agent_priv->lock, flags);
902 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
903 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
904 queue_work(mad_agent_priv->qp_info->port_priv->wq,
Hal Rosenstockb82cab62005-07-27 11:45:22 -0700905 &mad_agent_priv->local_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 ret = 1;
907out:
908 return ret;
909}
910
Ira Weiny548ead12015-06-06 14:38:33 -0400911static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700912{
913 int seg_size, pad;
914
Ira Weiny548ead12015-06-06 14:38:33 -0400915 seg_size = mad_size - hdr_len;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700916 if (data_len && seg_size) {
917 pad = seg_size - data_len % seg_size;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800918 return pad == seg_size ? 0 : pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700919 } else
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800920 return seg_size;
921}
922
923static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
924{
925 struct ib_rmpp_segment *s, *t;
926
927 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
928 list_del(&s->list);
929 kfree(s);
930 }
931}
932
933static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
Ira Weiny548ead12015-06-06 14:38:33 -0400934 size_t mad_size, gfp_t gfp_mask)
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800935{
936 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
937 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
938 struct ib_rmpp_segment *seg = NULL;
939 int left, seg_size, pad;
940
Ira Weiny548ead12015-06-06 14:38:33 -0400941 send_buf->seg_size = mad_size - send_buf->hdr_len;
942 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800943 seg_size = send_buf->seg_size;
944 pad = send_wr->pad;
945
946 /* Allocate data segments. */
947 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
948 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
949 if (!seg) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -0400950 dev_err(&send_buf->mad_agent->device->dev,
951 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
952 sizeof (*seg) + seg_size, gfp_mask);
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800953 free_send_rmpp_list(send_wr);
954 return -ENOMEM;
955 }
956 seg->num = ++send_buf->seg_count;
957 list_add_tail(&seg->list, &send_wr->rmpp_list);
958 }
959
960 /* Zero any padding */
961 if (pad)
962 memset(seg->data + seg_size - pad, 0, pad);
963
964 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
965 agent.rmpp_version;
966 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
967 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
968
969 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
970 struct ib_rmpp_segment, list);
971 send_wr->last_ack_seg = send_wr->cur_seg;
972 return 0;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700973}
974
Ira Weinyf766c582015-05-08 14:27:24 -0400975int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
Ira Weiny1471cb62014-08-08 19:00:56 -0400976{
977 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
978}
979EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
980
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700981struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
982 u32 remote_qpn, u16 pkey_index,
Sean Hefty34816ad2005-10-25 10:51:39 -0700983 int rmpp_active,
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700984 int hdr_len, int data_len,
Ira Weinyda2dfaa2015-06-06 14:38:28 -0400985 gfp_t gfp_mask,
986 u8 base_version)
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700987{
988 struct ib_mad_agent_private *mad_agent_priv;
Sean Hefty34816ad2005-10-25 10:51:39 -0700989 struct ib_mad_send_wr_private *mad_send_wr;
Jack Morgensteinf36e1792006-03-03 21:54:13 -0800990 int pad, message_size, ret, size;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700991 void *buf;
Ira Weiny548ead12015-06-06 14:38:33 -0400992 size_t mad_size;
993 bool opa;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -0700994
Sean Hefty34816ad2005-10-25 10:51:39 -0700995 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
996 agent);
Ira Weiny548ead12015-06-06 14:38:33 -0400997
998 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
999
1000 if (opa && base_version == OPA_MGMT_BASE_VERSION)
1001 mad_size = sizeof(struct opa_mad);
1002 else
1003 mad_size = sizeof(struct ib_mad);
1004
1005 pad = get_pad_size(hdr_len, data_len, mad_size);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001006 message_size = hdr_len + data_len + pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001007
Ira Weiny1471cb62014-08-08 19:00:56 -04001008 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
Ira Weiny548ead12015-06-06 14:38:33 -04001009 if (!rmpp_active && message_size > mad_size)
Ira Weiny1471cb62014-08-08 19:00:56 -04001010 return ERR_PTR(-EINVAL);
1011 } else
Ira Weiny548ead12015-06-06 14:38:33 -04001012 if (rmpp_active || message_size > mad_size)
Ira Weiny1471cb62014-08-08 19:00:56 -04001013 return ERR_PTR(-EINVAL);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001014
Ira Weiny548ead12015-06-06 14:38:33 -04001015 size = rmpp_active ? hdr_len : mad_size;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001016 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001017 if (!buf)
1018 return ERR_PTR(-ENOMEM);
1019
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001020 mad_send_wr = buf + size;
1021 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
Sean Hefty34816ad2005-10-25 10:51:39 -07001022 mad_send_wr->send_buf.mad = buf;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001023 mad_send_wr->send_buf.hdr_len = hdr_len;
1024 mad_send_wr->send_buf.data_len = data_len;
1025 mad_send_wr->pad = pad;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001026
Sean Hefty34816ad2005-10-25 10:51:39 -07001027 mad_send_wr->mad_agent_priv = mad_agent_priv;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001028 mad_send_wr->sg_list[0].length = hdr_len;
Jason Gunthorpe4be90bc2015-07-30 17:22:16 -06001029 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
Ira Weiny548ead12015-06-06 14:38:33 -04001030
1031 /* OPA MADs don't have to be the full 2048 bytes */
1032 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1033 data_len < mad_size - hdr_len)
1034 mad_send_wr->sg_list[1].length = data_len;
1035 else
1036 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1037
Jason Gunthorpe4be90bc2015-07-30 17:22:16 -06001038 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001039
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08001040 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1041
1042 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001043 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1044 mad_send_wr->send_wr.wr.num_sge = 2;
1045 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1046 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1047 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1048 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1049 mad_send_wr->send_wr.pkey_index = pkey_index;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001050
1051 if (rmpp_active) {
Ira Weiny548ead12015-06-06 14:38:33 -04001052 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001053 if (ret) {
1054 kfree(buf);
1055 return ERR_PTR(ret);
1056 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001057 }
1058
Sean Hefty34816ad2005-10-25 10:51:39 -07001059 mad_send_wr->send_buf.mad_agent = mad_agent;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001060 atomic_inc(&mad_agent_priv->refcount);
Sean Hefty34816ad2005-10-25 10:51:39 -07001061 return &mad_send_wr->send_buf;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001062}
1063EXPORT_SYMBOL(ib_create_send_mad);
1064
Hal Rosenstock618a3c02006-03-28 16:40:04 -08001065int ib_get_mad_data_offset(u8 mgmt_class)
1066{
1067 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1068 return IB_MGMT_SA_HDR;
1069 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1070 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1071 (mgmt_class == IB_MGMT_CLASS_BIS))
1072 return IB_MGMT_DEVICE_HDR;
1073 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1074 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1075 return IB_MGMT_VENDOR_HDR;
1076 else
1077 return IB_MGMT_MAD_HDR;
1078}
1079EXPORT_SYMBOL(ib_get_mad_data_offset);
1080
1081int ib_is_mad_class_rmpp(u8 mgmt_class)
1082{
1083 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1084 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1085 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1086 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1087 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1088 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1089 return 1;
1090 return 0;
1091}
1092EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1093
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001094void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1095{
1096 struct ib_mad_send_wr_private *mad_send_wr;
1097 struct list_head *list;
1098
1099 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1100 send_buf);
1101 list = &mad_send_wr->cur_seg->list;
1102
1103 if (mad_send_wr->cur_seg->num < seg_num) {
1104 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1105 if (mad_send_wr->cur_seg->num == seg_num)
1106 break;
1107 } else if (mad_send_wr->cur_seg->num > seg_num) {
1108 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1109 if (mad_send_wr->cur_seg->num == seg_num)
1110 break;
1111 }
1112 return mad_send_wr->cur_seg->data;
1113}
1114EXPORT_SYMBOL(ib_get_rmpp_segment);
1115
1116static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1117{
1118 if (mad_send_wr->send_buf.seg_count)
1119 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1120 mad_send_wr->seg_num);
1121 else
1122 return mad_send_wr->send_buf.mad +
1123 mad_send_wr->send_buf.hdr_len;
1124}
1125
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001126void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1127{
1128 struct ib_mad_agent_private *mad_agent_priv;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001129 struct ib_mad_send_wr_private *mad_send_wr;
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001130
1131 mad_agent_priv = container_of(send_buf->mad_agent,
1132 struct ib_mad_agent_private, agent);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001133 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1134 send_buf);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001135
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001136 free_send_rmpp_list(mad_send_wr);
1137 kfree(send_buf->mad);
Sean Hefty1b52fa982006-05-12 14:57:52 -07001138 deref_mad_agent(mad_agent_priv);
Hal Rosenstock824c8ae2005-07-27 11:45:23 -07001139}
1140EXPORT_SYMBOL(ib_free_send_mad);
1141
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001142int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143{
1144 struct ib_mad_qp_info *qp_info;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001145 struct list_head *list;
Sean Hefty34816ad2005-10-25 10:51:39 -07001146 struct ib_send_wr *bad_send_wr;
1147 struct ib_mad_agent *mad_agent;
1148 struct ib_sge *sge;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 unsigned long flags;
1150 int ret;
1151
Hal Rosenstockf8197a42005-07-27 11:45:24 -07001152 /* Set WR ID to find mad_send_wr upon completion */
Hal Rosenstockd760ce82005-07-27 11:45:25 -07001153 qp_info = mad_send_wr->mad_agent_priv->qp_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08001155 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1156 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157
Sean Hefty34816ad2005-10-25 10:51:39 -07001158 mad_agent = mad_send_wr->send_buf.mad_agent;
1159 sge = mad_send_wr->sg_list;
Ralph Campbell15271062006-12-12 14:28:30 -08001160 sge[0].addr = ib_dma_map_single(mad_agent->device,
1161 mad_send_wr->send_buf.mad,
1162 sge[0].length,
1163 DMA_TO_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02001164 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1165 return -ENOMEM;
1166
Ralph Campbell15271062006-12-12 14:28:30 -08001167 mad_send_wr->header_mapping = sge[0].addr;
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001168
Ralph Campbell15271062006-12-12 14:28:30 -08001169 sge[1].addr = ib_dma_map_single(mad_agent->device,
1170 ib_get_payload(mad_send_wr),
1171 sge[1].length,
1172 DMA_TO_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02001173 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1174 ib_dma_unmap_single(mad_agent->device,
1175 mad_send_wr->header_mapping,
1176 sge[0].length, DMA_TO_DEVICE);
1177 return -ENOMEM;
1178 }
Ralph Campbell15271062006-12-12 14:28:30 -08001179 mad_send_wr->payload_mapping = sge[1].addr;
Sean Hefty34816ad2005-10-25 10:51:39 -07001180
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001182 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001183 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
Sean Hefty34816ad2005-10-25 10:51:39 -07001184 &bad_send_wr);
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001185 list = &qp_info->send_queue.list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 ret = 0;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001188 list = &qp_info->overflow_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 }
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07001190
1191 if (!ret) {
1192 qp_info->send_queue.count++;
1193 list_add_tail(&mad_send_wr->mad_list.list, list);
1194 }
1195 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001196 if (ret) {
Ralph Campbell15271062006-12-12 14:28:30 -08001197 ib_dma_unmap_single(mad_agent->device,
1198 mad_send_wr->header_mapping,
1199 sge[0].length, DMA_TO_DEVICE);
1200 ib_dma_unmap_single(mad_agent->device,
1201 mad_send_wr->payload_mapping,
1202 sge[1].length, DMA_TO_DEVICE);
Jack Morgensteinf36e1792006-03-03 21:54:13 -08001203 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 return ret;
1205}
1206
1207/*
1208 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1209 * with the registered client
1210 */
Sean Hefty34816ad2005-10-25 10:51:39 -07001211int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1212 struct ib_mad_send_buf **bad_send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 struct ib_mad_agent_private *mad_agent_priv;
Sean Hefty34816ad2005-10-25 10:51:39 -07001215 struct ib_mad_send_buf *next_send_buf;
1216 struct ib_mad_send_wr_private *mad_send_wr;
1217 unsigned long flags;
1218 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
1220 /* Walk list of send WRs and post each on send list */
Sean Hefty34816ad2005-10-25 10:51:39 -07001221 for (; send_buf; send_buf = next_send_buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
Sean Hefty34816ad2005-10-25 10:51:39 -07001223 mad_send_wr = container_of(send_buf,
1224 struct ib_mad_send_wr_private,
1225 send_buf);
1226 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
Sean Hefty34816ad2005-10-25 10:51:39 -07001228 if (!send_buf->mad_agent->send_handler ||
1229 (send_buf->timeout_ms &&
1230 !send_buf->mad_agent->recv_handler)) {
1231 ret = -EINVAL;
1232 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 }
1234
Hal Rosenstock618a3c02006-03-28 16:40:04 -08001235 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1236 if (mad_agent_priv->agent.rmpp_version) {
1237 ret = -EINVAL;
1238 goto error;
1239 }
1240 }
1241
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 /*
1243 * Save pointer to next work request to post in case the
1244 * current one completes, and the user modifies the work
1245 * request associated with the completion
1246 */
Sean Hefty34816ad2005-10-25 10:51:39 -07001247 next_send_buf = send_buf->next;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001248 mad_send_wr->send_wr.ah = send_buf->ah;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249
Sean Hefty34816ad2005-10-25 10:51:39 -07001250 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1251 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1252 ret = handle_outgoing_dr_smp(mad_agent_priv,
1253 mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 if (ret < 0) /* error */
Sean Hefty34816ad2005-10-25 10:51:39 -07001255 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 else if (ret == 1) /* locally consumed */
Sean Hefty34816ad2005-10-25 10:51:39 -07001257 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 }
1259
Sean Hefty34816ad2005-10-25 10:51:39 -07001260 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 /* Timeout will be updated after send completes */
Sean Hefty34816ad2005-10-25 10:51:39 -07001262 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
Sean Hefty4fc8cd42007-11-27 00:11:04 -08001263 mad_send_wr->max_retries = send_buf->retries;
1264 mad_send_wr->retries_left = send_buf->retries;
1265 send_buf->retries = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001266 /* Reference for work request to QP + response */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1268 mad_send_wr->status = IB_WC_SUCCESS;
1269
1270 /* Reference MAD agent until send completes */
1271 atomic_inc(&mad_agent_priv->refcount);
1272 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1273 list_add_tail(&mad_send_wr->agent_list,
1274 &mad_agent_priv->send_list);
1275 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1276
Ira Weiny1471cb62014-08-08 19:00:56 -04001277 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001278 ret = ib_send_rmpp_mad(mad_send_wr);
1279 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1280 ret = ib_send_mad(mad_send_wr);
1281 } else
1282 ret = ib_send_mad(mad_send_wr);
1283 if (ret < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 /* Fail send request */
1285 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1286 list_del(&mad_send_wr->agent_list);
1287 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1288 atomic_dec(&mad_agent_priv->refcount);
Sean Hefty34816ad2005-10-25 10:51:39 -07001289 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 }
1292 return 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07001293error:
1294 if (bad_send_buf)
1295 *bad_send_buf = send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 return ret;
1297}
1298EXPORT_SYMBOL(ib_post_send_mad);
1299
1300/*
1301 * ib_free_recv_mad - Returns data buffers used to receive
1302 * a MAD to the access layer
1303 */
1304void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1305{
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001306 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 struct ib_mad_private_header *mad_priv_hdr;
1308 struct ib_mad_private *priv;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001309 struct list_head free_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001311 INIT_LIST_HEAD(&free_list);
1312 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001314 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1315 &free_list, list) {
1316 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1317 recv_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 mad_priv_hdr = container_of(mad_recv_wc,
1319 struct ib_mad_private_header,
1320 recv_wc);
1321 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1322 header);
Ira Weinyc9082e52015-06-06 14:38:30 -04001323 kfree(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325}
1326EXPORT_SYMBOL(ib_free_recv_mad);
1327
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1329 u8 rmpp_version,
1330 ib_mad_send_handler send_handler,
1331 ib_mad_recv_handler recv_handler,
1332 void *context)
1333{
1334 return ERR_PTR(-EINVAL); /* XXX: for now */
1335}
1336EXPORT_SYMBOL(ib_redirect_mad_qp);
1337
1338int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1339 struct ib_wc *wc)
1340{
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001341 dev_err(&mad_agent->device->dev,
1342 "ib_process_mad_wc() not implemented yet\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 return 0;
1344}
1345EXPORT_SYMBOL(ib_process_mad_wc);
1346
1347static int method_in_use(struct ib_mad_mgmt_method_table **method,
1348 struct ib_mad_reg_req *mad_reg_req)
1349{
1350 int i;
1351
Akinobu Mita19b629f2010-03-05 13:41:38 -08001352 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 if ((*method)->agent[i]) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001354 pr_err("Method %d already in use\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 return -EINVAL;
1356 }
1357 }
1358 return 0;
1359}
1360
1361static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1362{
1363 /* Allocate management method table */
Roland Dreierde6eb662005-11-02 07:23:14 -08001364 *method = kzalloc(sizeof **method, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 if (!*method) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001366 pr_err("No memory for ib_mad_mgmt_method_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 return -ENOMEM;
1368 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369
1370 return 0;
1371}
1372
1373/*
1374 * Check to see if there are any methods still in use
1375 */
1376static int check_method_table(struct ib_mad_mgmt_method_table *method)
1377{
1378 int i;
1379
1380 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1381 if (method->agent[i])
1382 return 1;
1383 return 0;
1384}
1385
1386/*
1387 * Check to see if there are any method tables for this class still in use
1388 */
1389static int check_class_table(struct ib_mad_mgmt_class_table *class)
1390{
1391 int i;
1392
1393 for (i = 0; i < MAX_MGMT_CLASS; i++)
1394 if (class->method_table[i])
1395 return 1;
1396 return 0;
1397}
1398
1399static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1400{
1401 int i;
1402
1403 for (i = 0; i < MAX_MGMT_OUI; i++)
1404 if (vendor_class->method_table[i])
1405 return 1;
1406 return 0;
1407}
1408
1409static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
Ira Weinyd94bd262015-06-06 14:38:22 -04001410 const char *oui)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411{
1412 int i;
1413
1414 for (i = 0; i < MAX_MGMT_OUI; i++)
Roland Dreier3cd96562006-09-22 15:22:46 -07001415 /* Is there matching OUI for this vendor class ? */
1416 if (!memcmp(vendor_class->oui[i], oui, 3))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 return i;
1418
1419 return -1;
1420}
1421
1422static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1423{
1424 int i;
1425
1426 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1427 if (vendor->vendor_class[i])
1428 return 1;
1429
1430 return 0;
1431}
1432
1433static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1434 struct ib_mad_agent_private *agent)
1435{
1436 int i;
1437
1438 /* Remove any methods for this mad agent */
1439 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1440 if (method->agent[i] == agent) {
1441 method->agent[i] = NULL;
1442 }
1443 }
1444}
1445
1446static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1447 struct ib_mad_agent_private *agent_priv,
1448 u8 mgmt_class)
1449{
1450 struct ib_mad_port_private *port_priv;
1451 struct ib_mad_mgmt_class_table **class;
1452 struct ib_mad_mgmt_method_table **method;
1453 int i, ret;
1454
1455 port_priv = agent_priv->qp_info->port_priv;
1456 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1457 if (!*class) {
1458 /* Allocate management class table for "new" class version */
Roland Dreierde6eb662005-11-02 07:23:14 -08001459 *class = kzalloc(sizeof **class, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 if (!*class) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001461 dev_err(&agent_priv->agent.device->dev,
1462 "No memory for ib_mad_mgmt_class_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 ret = -ENOMEM;
1464 goto error1;
1465 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001466
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 /* Allocate method table for this management class */
1468 method = &(*class)->method_table[mgmt_class];
1469 if ((ret = allocate_method_table(method)))
1470 goto error2;
1471 } else {
1472 method = &(*class)->method_table[mgmt_class];
1473 if (!*method) {
1474 /* Allocate method table for this management class */
1475 if ((ret = allocate_method_table(method)))
1476 goto error1;
1477 }
1478 }
1479
1480 /* Now, make sure methods are not already in use */
1481 if (method_in_use(method, mad_reg_req))
1482 goto error3;
1483
1484 /* Finally, add in methods being registered */
Akinobu Mita19b629f2010-03-05 13:41:38 -08001485 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 (*method)->agent[i] = agent_priv;
Akinobu Mita19b629f2010-03-05 13:41:38 -08001487
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 return 0;
1489
1490error3:
1491 /* Remove any methods for this mad agent */
1492 remove_methods_mad_agent(*method, agent_priv);
1493 /* Now, check to see if there are any methods in use */
1494 if (!check_method_table(*method)) {
1495 /* If not, release management method table */
1496 kfree(*method);
1497 *method = NULL;
1498 }
1499 ret = -EINVAL;
1500 goto error1;
1501error2:
1502 kfree(*class);
1503 *class = NULL;
1504error1:
1505 return ret;
1506}
1507
1508static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1509 struct ib_mad_agent_private *agent_priv)
1510{
1511 struct ib_mad_port_private *port_priv;
1512 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1513 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1514 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1515 struct ib_mad_mgmt_method_table **method;
1516 int i, ret = -ENOMEM;
1517 u8 vclass;
1518
1519 /* "New" vendor (with OUI) class */
1520 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1521 port_priv = agent_priv->qp_info->port_priv;
1522 vendor_table = &port_priv->version[
1523 mad_reg_req->mgmt_class_version].vendor;
1524 if (!*vendor_table) {
1525 /* Allocate mgmt vendor class table for "new" class version */
Roland Dreierde6eb662005-11-02 07:23:14 -08001526 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 if (!vendor) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001528 dev_err(&agent_priv->agent.device->dev,
1529 "No memory for ib_mad_mgmt_vendor_class_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 goto error1;
1531 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001532
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 *vendor_table = vendor;
1534 }
1535 if (!(*vendor_table)->vendor_class[vclass]) {
1536 /* Allocate table for this management vendor class */
Roland Dreierde6eb662005-11-02 07:23:14 -08001537 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 if (!vendor_class) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001539 dev_err(&agent_priv->agent.device->dev,
1540 "No memory for ib_mad_mgmt_vendor_class\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 goto error2;
1542 }
Roland Dreierde6eb662005-11-02 07:23:14 -08001543
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 (*vendor_table)->vendor_class[vclass] = vendor_class;
1545 }
1546 for (i = 0; i < MAX_MGMT_OUI; i++) {
1547 /* Is there matching OUI for this vendor class ? */
1548 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1549 mad_reg_req->oui, 3)) {
1550 method = &(*vendor_table)->vendor_class[
1551 vclass]->method_table[i];
1552 BUG_ON(!*method);
1553 goto check_in_use;
1554 }
1555 }
1556 for (i = 0; i < MAX_MGMT_OUI; i++) {
1557 /* OUI slot available ? */
1558 if (!is_vendor_oui((*vendor_table)->vendor_class[
1559 vclass]->oui[i])) {
1560 method = &(*vendor_table)->vendor_class[
1561 vclass]->method_table[i];
1562 BUG_ON(*method);
1563 /* Allocate method table for this OUI */
1564 if ((ret = allocate_method_table(method)))
1565 goto error3;
1566 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1567 mad_reg_req->oui, 3);
1568 goto check_in_use;
1569 }
1570 }
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001571 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 goto error3;
1573
1574check_in_use:
1575 /* Now, make sure methods are not already in use */
1576 if (method_in_use(method, mad_reg_req))
1577 goto error4;
1578
1579 /* Finally, add in methods being registered */
Akinobu Mita19b629f2010-03-05 13:41:38 -08001580 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 (*method)->agent[i] = agent_priv;
Akinobu Mita19b629f2010-03-05 13:41:38 -08001582
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 return 0;
1584
1585error4:
1586 /* Remove any methods for this mad agent */
1587 remove_methods_mad_agent(*method, agent_priv);
1588 /* Now, check to see if there are any methods in use */
1589 if (!check_method_table(*method)) {
1590 /* If not, release management method table */
1591 kfree(*method);
1592 *method = NULL;
1593 }
1594 ret = -EINVAL;
1595error3:
1596 if (vendor_class) {
1597 (*vendor_table)->vendor_class[vclass] = NULL;
1598 kfree(vendor_class);
1599 }
1600error2:
1601 if (vendor) {
1602 *vendor_table = NULL;
1603 kfree(vendor);
1604 }
1605error1:
1606 return ret;
1607}
1608
1609static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1610{
1611 struct ib_mad_port_private *port_priv;
1612 struct ib_mad_mgmt_class_table *class;
1613 struct ib_mad_mgmt_method_table *method;
1614 struct ib_mad_mgmt_vendor_class_table *vendor;
1615 struct ib_mad_mgmt_vendor_class *vendor_class;
1616 int index;
1617 u8 mgmt_class;
1618
1619 /*
1620 * Was MAD registration request supplied
1621 * with original registration ?
1622 */
1623 if (!agent_priv->reg_req) {
1624 goto out;
1625 }
1626
1627 port_priv = agent_priv->qp_info->port_priv;
1628 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1629 class = port_priv->version[
1630 agent_priv->reg_req->mgmt_class_version].class;
1631 if (!class)
1632 goto vendor_check;
1633
1634 method = class->method_table[mgmt_class];
1635 if (method) {
1636 /* Remove any methods for this mad agent */
1637 remove_methods_mad_agent(method, agent_priv);
1638 /* Now, check to see if there are any methods still in use */
1639 if (!check_method_table(method)) {
1640 /* If not, release management method table */
Bart Van Assche2190d102016-06-03 12:08:44 -07001641 kfree(method);
1642 class->method_table[mgmt_class] = NULL;
1643 /* Any management classes left ? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 if (!check_class_table(class)) {
1645 /* If not, release management class table */
1646 kfree(class);
1647 port_priv->version[
1648 agent_priv->reg_req->
1649 mgmt_class_version].class = NULL;
1650 }
1651 }
1652 }
1653
1654vendor_check:
1655 if (!is_vendor_class(mgmt_class))
1656 goto out;
1657
1658 /* normalize mgmt_class to vendor range 2 */
1659 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1660 vendor = port_priv->version[
1661 agent_priv->reg_req->mgmt_class_version].vendor;
1662
1663 if (!vendor)
1664 goto out;
1665
1666 vendor_class = vendor->vendor_class[mgmt_class];
1667 if (vendor_class) {
1668 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1669 if (index < 0)
1670 goto out;
1671 method = vendor_class->method_table[index];
1672 if (method) {
1673 /* Remove any methods for this mad agent */
1674 remove_methods_mad_agent(method, agent_priv);
1675 /*
1676 * Now, check to see if there are
1677 * any methods still in use
1678 */
1679 if (!check_method_table(method)) {
1680 /* If not, release management method table */
1681 kfree(method);
1682 vendor_class->method_table[index] = NULL;
1683 memset(vendor_class->oui[index], 0, 3);
1684 /* Any OUIs left ? */
1685 if (!check_vendor_class(vendor_class)) {
1686 /* If not, release vendor class table */
1687 kfree(vendor_class);
1688 vendor->vendor_class[mgmt_class] = NULL;
1689 /* Any other vendor classes left ? */
1690 if (!check_vendor_table(vendor)) {
1691 kfree(vendor);
1692 port_priv->version[
1693 agent_priv->reg_req->
1694 mgmt_class_version].
1695 vendor = NULL;
1696 }
1697 }
1698 }
1699 }
1700 }
1701
1702out:
1703 return;
1704}
1705
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706static struct ib_mad_agent_private *
1707find_mad_agent(struct ib_mad_port_private *port_priv,
Ira Weinyd94bd262015-06-06 14:38:22 -04001708 const struct ib_mad_hdr *mad_hdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709{
1710 struct ib_mad_agent_private *mad_agent = NULL;
1711 unsigned long flags;
1712
1713 spin_lock_irqsave(&port_priv->reg_lock, flags);
Ira Weinyd94bd262015-06-06 14:38:22 -04001714 if (ib_response_mad(mad_hdr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 u32 hi_tid;
1716 struct ib_mad_agent_private *entry;
1717
1718 /*
1719 * Routing is based on high 32 bits of transaction ID
1720 * of MAD.
1721 */
Ira Weinyd94bd262015-06-06 14:38:22 -04001722 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
Sean Hefty34816ad2005-10-25 10:51:39 -07001723 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 if (entry->agent.hi_tid == hi_tid) {
1725 mad_agent = entry;
1726 break;
1727 }
1728 }
1729 } else {
1730 struct ib_mad_mgmt_class_table *class;
1731 struct ib_mad_mgmt_method_table *method;
1732 struct ib_mad_mgmt_vendor_class_table *vendor;
1733 struct ib_mad_mgmt_vendor_class *vendor_class;
Ira Weinyd94bd262015-06-06 14:38:22 -04001734 const struct ib_vendor_mad *vendor_mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 int index;
1736
1737 /*
1738 * Routing is based on version, class, and method
1739 * For "newer" vendor MADs, also based on OUI
1740 */
Ira Weinyd94bd262015-06-06 14:38:22 -04001741 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 goto out;
Ira Weinyd94bd262015-06-06 14:38:22 -04001743 if (!is_vendor_class(mad_hdr->mgmt_class)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 class = port_priv->version[
Ira Weinyd94bd262015-06-06 14:38:22 -04001745 mad_hdr->class_version].class;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 if (!class)
1747 goto out;
Ira Weinyd94bd262015-06-06 14:38:22 -04001748 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
Hefty, Seanb7ab0b12011-10-06 09:33:05 -07001749 IB_MGMT_MAX_METHODS)
1750 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 method = class->method_table[convert_mgmt_class(
Ira Weinyd94bd262015-06-06 14:38:22 -04001752 mad_hdr->mgmt_class)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 if (method)
Ira Weinyd94bd262015-06-06 14:38:22 -04001754 mad_agent = method->agent[mad_hdr->method &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 ~IB_MGMT_METHOD_RESP];
1756 } else {
1757 vendor = port_priv->version[
Ira Weinyd94bd262015-06-06 14:38:22 -04001758 mad_hdr->class_version].vendor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 if (!vendor)
1760 goto out;
1761 vendor_class = vendor->vendor_class[vendor_class_index(
Ira Weinyd94bd262015-06-06 14:38:22 -04001762 mad_hdr->mgmt_class)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 if (!vendor_class)
1764 goto out;
1765 /* Find matching OUI */
Ira Weinyd94bd262015-06-06 14:38:22 -04001766 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1768 if (index == -1)
1769 goto out;
1770 method = vendor_class->method_table[index];
1771 if (method) {
Ira Weinyd94bd262015-06-06 14:38:22 -04001772 mad_agent = method->agent[mad_hdr->method &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 ~IB_MGMT_METHOD_RESP];
1774 }
1775 }
1776 }
1777
1778 if (mad_agent) {
1779 if (mad_agent->agent.recv_handler)
1780 atomic_inc(&mad_agent->refcount);
1781 else {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04001782 dev_notice(&port_priv->device->dev,
1783 "No receive handler for client %p on port %d\n",
1784 &mad_agent->agent, port_priv->port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 mad_agent = NULL;
1786 }
1787 }
1788out:
1789 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1790
1791 return mad_agent;
1792}
1793
Ira Weiny8e4349d2015-06-10 16:16:48 -04001794static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1795 const struct ib_mad_qp_info *qp_info,
1796 bool opa)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797{
1798 int valid = 0;
Ira Weiny8e4349d2015-06-10 16:16:48 -04001799 u32 qp_num = qp_info->qp->qp_num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800
1801 /* Make sure MAD base version is understood */
Ira Weiny8e4349d2015-06-10 16:16:48 -04001802 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1803 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1804 pr_err("MAD received with unsupported base version %d %s\n",
1805 mad_hdr->base_version, opa ? "(opa)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 goto out;
1807 }
1808
1809 /* Filter SMI packets sent to other than QP0 */
Ira Weiny77f60832015-05-08 14:27:21 -04001810 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1811 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 if (qp_num == 0)
1813 valid = 1;
1814 } else {
Hal Rosenstock53370882015-11-13 15:22:22 -05001815 /* CM attributes other than ClassPortInfo only use Send method */
1816 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1817 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1818 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1819 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 /* Filter GSI packets sent to QP0 */
1821 if (qp_num != 0)
1822 valid = 1;
1823 }
1824
1825out:
1826 return valid;
1827}
1828
Ira Weinyf766c582015-05-08 14:27:24 -04001829static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1830 const struct ib_mad_hdr *mad_hdr)
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001831{
1832 struct ib_rmpp_mad *rmpp_mad;
1833
1834 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1835 return !mad_agent_priv->agent.rmpp_version ||
Ira Weiny1471cb62014-08-08 19:00:56 -04001836 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001837 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1838 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1839 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1840}
1841
Ira Weiny8bf4b302015-05-08 14:27:23 -04001842static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1843 const struct ib_mad_recv_wc *rwc)
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001844{
Ira Weiny8bf4b302015-05-08 14:27:23 -04001845 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001846 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1847}
1848
Ira Weinyf766c582015-05-08 14:27:24 -04001849static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1850 const struct ib_mad_send_wr_private *wr,
1851 const struct ib_mad_recv_wc *rwc )
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001852{
1853 struct ib_ah_attr attr;
1854 u8 send_resp, rcv_resp;
Jack Morgenstein9874e742006-06-17 20:37:34 -07001855 union ib_gid sgid;
1856 struct ib_device *device = mad_agent_priv->agent.device;
1857 u8 port_num = mad_agent_priv->agent.port_num;
1858 u8 lmc;
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001859
Ira Weiny96909302015-05-08 14:27:22 -04001860 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1861 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001862
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001863 if (send_resp == rcv_resp)
1864 /* both requests, or both responses. GIDs different */
1865 return 0;
1866
1867 if (ib_query_ah(wr->send_buf.ah, &attr))
1868 /* Assume not equal, to avoid false positives. */
1869 return 0;
1870
Jack Morgenstein9874e742006-06-17 20:37:34 -07001871 if (!!(attr.ah_flags & IB_AH_GRH) !=
1872 !!(rwc->wc->wc_flags & IB_WC_GRH))
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001873 /* one has GID, other does not. Assume different */
1874 return 0;
Jack Morgenstein9874e742006-06-17 20:37:34 -07001875
1876 if (!send_resp && rcv_resp) {
1877 /* is request/response. */
1878 if (!(attr.ah_flags & IB_AH_GRH)) {
1879 if (ib_get_cached_lmc(device, port_num, &lmc))
1880 return 0;
1881 return (!lmc || !((attr.src_path_bits ^
1882 rwc->wc->dlid_path_bits) &
1883 ((1 << lmc) - 1)));
1884 } else {
1885 if (ib_get_cached_gid(device, port_num,
Matan Barak55ee3ab2015-10-15 18:38:45 +03001886 attr.grh.sgid_index, &sgid, NULL))
Jack Morgenstein9874e742006-06-17 20:37:34 -07001887 return 0;
1888 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1889 16);
1890 }
1891 }
1892
1893 if (!(attr.ah_flags & IB_AH_GRH))
1894 return attr.dlid == rwc->wc->slid;
1895 else
1896 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1897 16);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001898}
Jack Morgenstein9874e742006-06-17 20:37:34 -07001899
1900static inline int is_direct(u8 class)
1901{
1902 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1903}
1904
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001905struct ib_mad_send_wr_private*
Ira Weinyf766c582015-05-08 14:27:24 -04001906ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1907 const struct ib_mad_recv_wc *wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908{
Jack Morgenstein9874e742006-06-17 20:37:34 -07001909 struct ib_mad_send_wr_private *wr;
Ira Weiny83a1d222015-06-06 14:38:23 -04001910 const struct ib_mad_hdr *mad_hdr;
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001911
Ira Weiny83a1d222015-06-06 14:38:23 -04001912 mad_hdr = &wc->recv_buf.mad->mad_hdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
Jack Morgenstein9874e742006-06-17 20:37:34 -07001914 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
Ira Weiny83a1d222015-06-06 14:38:23 -04001915 if ((wr->tid == mad_hdr->tid) &&
Jack Morgenstein9874e742006-06-17 20:37:34 -07001916 rcv_has_same_class(wr, wc) &&
1917 /*
1918 * Don't check GID for direct routed MADs.
1919 * These might have permissive LIDs.
1920 */
Ira Weiny83a1d222015-06-06 14:38:23 -04001921 (is_direct(mad_hdr->mgmt_class) ||
Jack Morgenstein9874e742006-06-17 20:37:34 -07001922 rcv_has_same_gid(mad_agent_priv, wr, wc)))
Roland Dreier39798692006-11-13 09:38:07 -08001923 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 }
1925
1926 /*
1927 * It's possible to receive the response before we've
1928 * been notified that the send has completed
1929 */
Jack Morgenstein9874e742006-06-17 20:37:34 -07001930 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
Ira Weinyc597eee2015-05-08 13:10:03 -04001931 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
Ira Weiny83a1d222015-06-06 14:38:23 -04001932 wr->tid == mad_hdr->tid &&
Jack Morgenstein9874e742006-06-17 20:37:34 -07001933 wr->timeout &&
1934 rcv_has_same_class(wr, wc) &&
1935 /*
1936 * Don't check GID for direct routed MADs.
1937 * These might have permissive LIDs.
1938 */
Ira Weiny83a1d222015-06-06 14:38:23 -04001939 (is_direct(mad_hdr->mgmt_class) ||
Jack Morgenstein9874e742006-06-17 20:37:34 -07001940 rcv_has_same_gid(mad_agent_priv, wr, wc)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 /* Verify request has not been canceled */
Jack Morgenstein9874e742006-06-17 20:37:34 -07001942 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 }
1944 return NULL;
1945}
1946
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001947void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001948{
1949 mad_send_wr->timeout = 0;
Akinobu Mita179e0912006-06-26 00:24:41 -07001950 if (mad_send_wr->refcount == 1)
1951 list_move_tail(&mad_send_wr->agent_list,
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001952 &mad_send_wr->mad_agent_priv->done_list);
Hal Rosenstock6a0c4352005-07-27 11:45:26 -07001953}
1954
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07001956 struct ib_mad_recv_wc *mad_recv_wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957{
1958 struct ib_mad_send_wr_private *mad_send_wr;
1959 struct ib_mad_send_wc mad_send_wc;
1960 unsigned long flags;
1961
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001962 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1963 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
Ira Weiny1471cb62014-08-08 19:00:56 -04001964 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001965 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1966 mad_recv_wc);
1967 if (!mad_recv_wc) {
Sean Hefty1b52fa982006-05-12 14:57:52 -07001968 deref_mad_agent(mad_agent_priv);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07001969 return;
1970 }
1971 }
1972
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 /* Complete corresponding request */
Ira Weiny96909302015-05-08 14:27:22 -04001974 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Jack Morgensteinfa9656b2006-03-28 16:39:07 -08001976 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 if (!mad_send_wr) {
1978 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Ira Weiny1471cb62014-08-08 19:00:56 -04001979 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1980 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1981 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1982 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1983 /* user rmpp is in effect
1984 * and this is an active RMPP MAD
1985 */
Christoph Hellwigca281262016-01-04 14:15:58 +01001986 mad_agent_priv->agent.recv_handler(
1987 &mad_agent_priv->agent, NULL,
1988 mad_recv_wc);
Ira Weiny1471cb62014-08-08 19:00:56 -04001989 atomic_dec(&mad_agent_priv->refcount);
1990 } else {
1991 /* not user rmpp, revert to normal behavior and
1992 * drop the mad */
1993 ib_free_recv_mad(mad_recv_wc);
1994 deref_mad_agent(mad_agent_priv);
1995 return;
1996 }
1997 } else {
1998 ib_mark_mad_done(mad_send_wr);
1999 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2000
2001 /* Defined behavior is to complete response before request */
Christoph Hellwigca281262016-01-04 14:15:58 +01002002 mad_agent_priv->agent.recv_handler(
2003 &mad_agent_priv->agent,
2004 &mad_send_wr->send_buf,
2005 mad_recv_wc);
Ira Weiny1471cb62014-08-08 19:00:56 -04002006 atomic_dec(&mad_agent_priv->refcount);
2007
2008 mad_send_wc.status = IB_WC_SUCCESS;
2009 mad_send_wc.vendor_err = 0;
2010 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2011 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 } else {
Christoph Hellwigca281262016-01-04 14:15:58 +01002014 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07002015 mad_recv_wc);
Sean Hefty1b52fa982006-05-12 14:57:52 -07002016 deref_mad_agent(mad_agent_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 }
2018}
2019
Ira Weinye11ae8a2015-06-06 14:38:24 -04002020static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2021 const struct ib_mad_qp_info *qp_info,
2022 const struct ib_wc *wc,
2023 int port_num,
2024 struct ib_mad_private *recv,
2025 struct ib_mad_private *response)
2026{
2027 enum smi_forward_action retsmi;
Ira Weinyc9082e52015-06-06 14:38:30 -04002028 struct ib_smp *smp = (struct ib_smp *)recv->mad;
Ira Weinye11ae8a2015-06-06 14:38:24 -04002029
Ira Weinyc9082e52015-06-06 14:38:30 -04002030 if (smi_handle_dr_smp_recv(smp,
Hal Rosenstock41390322015-06-29 09:57:00 -04002031 rdma_cap_ib_switch(port_priv->device),
Ira Weinye11ae8a2015-06-06 14:38:24 -04002032 port_num,
2033 port_priv->device->phys_port_cnt) ==
2034 IB_SMI_DISCARD)
2035 return IB_SMI_DISCARD;
2036
Ira Weinyc9082e52015-06-06 14:38:30 -04002037 retsmi = smi_check_forward_dr_smp(smp);
Ira Weinye11ae8a2015-06-06 14:38:24 -04002038 if (retsmi == IB_SMI_LOCAL)
2039 return IB_SMI_HANDLE;
2040
2041 if (retsmi == IB_SMI_SEND) { /* don't forward */
Ira Weinyc9082e52015-06-06 14:38:30 -04002042 if (smi_handle_dr_smp_send(smp,
Hal Rosenstock41390322015-06-29 09:57:00 -04002043 rdma_cap_ib_switch(port_priv->device),
Ira Weinye11ae8a2015-06-06 14:38:24 -04002044 port_num) == IB_SMI_DISCARD)
2045 return IB_SMI_DISCARD;
2046
Ira Weinyc9082e52015-06-06 14:38:30 -04002047 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
Ira Weinye11ae8a2015-06-06 14:38:24 -04002048 return IB_SMI_DISCARD;
Hal Rosenstock41390322015-06-29 09:57:00 -04002049 } else if (rdma_cap_ib_switch(port_priv->device)) {
Ira Weinye11ae8a2015-06-06 14:38:24 -04002050 /* forward case for switches */
Ira Weinyc9082e52015-06-06 14:38:30 -04002051 memcpy(response, recv, mad_priv_size(response));
Ira Weinye11ae8a2015-06-06 14:38:24 -04002052 response->header.recv_wc.wc = &response->header.wc;
Ira Weinyc9082e52015-06-06 14:38:30 -04002053 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
Ira Weinye11ae8a2015-06-06 14:38:24 -04002054 response->header.recv_wc.recv_buf.grh = &response->grh;
2055
Ira Weinyc9082e52015-06-06 14:38:30 -04002056 agent_send_response((const struct ib_mad_hdr *)response->mad,
Ira Weinye11ae8a2015-06-06 14:38:24 -04002057 &response->grh, wc,
2058 port_priv->device,
Ira Weinyc9082e52015-06-06 14:38:30 -04002059 smi_get_fwd_port(smp),
2060 qp_info->qp->qp_num,
Ira Weiny8e4349d2015-06-10 16:16:48 -04002061 response->mad_size,
2062 false);
Ira Weinye11ae8a2015-06-06 14:38:24 -04002063
2064 return IB_SMI_DISCARD;
2065 }
2066 return IB_SMI_HANDLE;
2067}
2068
Ira Weinyc9082e52015-06-06 14:38:30 -04002069static bool generate_unmatched_resp(const struct ib_mad_private *recv,
Ira Weiny8e4349d2015-06-10 16:16:48 -04002070 struct ib_mad_private *response,
2071 size_t *resp_len, bool opa)
Swapna Thete0b307042012-02-25 17:47:32 -08002072{
Ira Weinyc9082e52015-06-06 14:38:30 -04002073 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2074 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2075
2076 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2077 recv_hdr->method == IB_MGMT_METHOD_SET) {
2078 memcpy(response, recv, mad_priv_size(response));
Swapna Thete0b307042012-02-25 17:47:32 -08002079 response->header.recv_wc.wc = &response->header.wc;
Ira Weinyc9082e52015-06-06 14:38:30 -04002080 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
Swapna Thete0b307042012-02-25 17:47:32 -08002081 response->header.recv_wc.recv_buf.grh = &response->grh;
Ira Weinyc9082e52015-06-06 14:38:30 -04002082 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2083 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2084 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2085 resp_hdr->status |= IB_SMP_DIRECTION;
Swapna Thete0b307042012-02-25 17:47:32 -08002086
Ira Weiny8e4349d2015-06-10 16:16:48 -04002087 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2088 if (recv_hdr->mgmt_class ==
2089 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2090 recv_hdr->mgmt_class ==
2091 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2092 *resp_len = opa_get_smp_header_size(
2093 (struct opa_smp *)recv->mad);
2094 else
2095 *resp_len = sizeof(struct ib_mad_hdr);
2096 }
2097
Swapna Thete0b307042012-02-25 17:47:32 -08002098 return true;
2099 } else {
2100 return false;
2101 }
2102}
Ira Weiny8e4349d2015-06-10 16:16:48 -04002103
2104static enum smi_action
2105handle_opa_smi(struct ib_mad_port_private *port_priv,
2106 struct ib_mad_qp_info *qp_info,
2107 struct ib_wc *wc,
2108 int port_num,
2109 struct ib_mad_private *recv,
2110 struct ib_mad_private *response)
2111{
2112 enum smi_forward_action retsmi;
2113 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2114
2115 if (opa_smi_handle_dr_smp_recv(smp,
Hal Rosenstock41390322015-06-29 09:57:00 -04002116 rdma_cap_ib_switch(port_priv->device),
Ira Weiny8e4349d2015-06-10 16:16:48 -04002117 port_num,
2118 port_priv->device->phys_port_cnt) ==
2119 IB_SMI_DISCARD)
2120 return IB_SMI_DISCARD;
2121
2122 retsmi = opa_smi_check_forward_dr_smp(smp);
2123 if (retsmi == IB_SMI_LOCAL)
2124 return IB_SMI_HANDLE;
2125
2126 if (retsmi == IB_SMI_SEND) { /* don't forward */
2127 if (opa_smi_handle_dr_smp_send(smp,
Hal Rosenstock41390322015-06-29 09:57:00 -04002128 rdma_cap_ib_switch(port_priv->device),
Ira Weiny8e4349d2015-06-10 16:16:48 -04002129 port_num) == IB_SMI_DISCARD)
2130 return IB_SMI_DISCARD;
2131
2132 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2133 IB_SMI_DISCARD)
2134 return IB_SMI_DISCARD;
2135
Hal Rosenstock41390322015-06-29 09:57:00 -04002136 } else if (rdma_cap_ib_switch(port_priv->device)) {
Ira Weiny8e4349d2015-06-10 16:16:48 -04002137 /* forward case for switches */
2138 memcpy(response, recv, mad_priv_size(response));
2139 response->header.recv_wc.wc = &response->header.wc;
2140 response->header.recv_wc.recv_buf.opa_mad =
2141 (struct opa_mad *)response->mad;
2142 response->header.recv_wc.recv_buf.grh = &response->grh;
2143
2144 agent_send_response((const struct ib_mad_hdr *)response->mad,
2145 &response->grh, wc,
2146 port_priv->device,
2147 opa_smi_get_fwd_port(smp),
2148 qp_info->qp->qp_num,
2149 recv->header.wc.byte_len,
2150 true);
2151
2152 return IB_SMI_DISCARD;
2153 }
2154
2155 return IB_SMI_HANDLE;
2156}
2157
2158static enum smi_action
2159handle_smi(struct ib_mad_port_private *port_priv,
2160 struct ib_mad_qp_info *qp_info,
2161 struct ib_wc *wc,
2162 int port_num,
2163 struct ib_mad_private *recv,
2164 struct ib_mad_private *response,
2165 bool opa)
2166{
2167 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2168
2169 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
Hal Rosenstock9fa240b2016-10-18 13:20:29 -04002170 mad_hdr->class_version == OPA_SM_CLASS_VERSION)
Ira Weiny8e4349d2015-06-10 16:16:48 -04002171 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2172 response);
2173
2174 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2175}
2176
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08002177static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178{
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08002179 struct ib_mad_port_private *port_priv = cq->cq_context;
2180 struct ib_mad_list_head *mad_list =
2181 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 struct ib_mad_qp_info *qp_info;
2183 struct ib_mad_private_header *mad_priv_hdr;
Hal Rosenstock445d6802007-08-03 10:45:17 -07002184 struct ib_mad_private *recv, *response = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 struct ib_mad_agent_private *mad_agent;
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002186 int port_num;
Jack Morgensteina9e74322012-04-24 16:08:57 -07002187 int ret = IB_MAD_RESULT_SUCCESS;
Ira Weiny4cd7c942015-06-06 14:38:31 -04002188 size_t mad_size;
2189 u16 resp_mad_pkey_index = 0;
Ira Weiny8e4349d2015-06-10 16:16:48 -04002190 bool opa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08002192 if (list_empty_careful(&port_priv->port_list))
2193 return;
2194
2195 if (wc->status != IB_WC_SUCCESS) {
2196 /*
2197 * Receive errors indicate that the QP has entered the error
2198 * state - error handling/shutdown code will cleanup
2199 */
2200 return;
2201 }
2202
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 qp_info = mad_list->mad_queue->qp_info;
2204 dequeue_mad(mad_list);
2205
Ira Weiny8e4349d2015-06-10 16:16:48 -04002206 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2207 qp_info->port_priv->port_num);
2208
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2210 mad_list);
2211 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
Ralph Campbell15271062006-12-12 14:28:30 -08002212 ib_dma_unmap_single(port_priv->device,
2213 recv->header.mapping,
Ira Weinyc9082e52015-06-06 14:38:30 -04002214 mad_priv_dma_size(recv),
Ralph Campbell15271062006-12-12 14:28:30 -08002215 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216
2217 /* Setup MAD receive work completion from "normal" work completion */
Sean Hefty24239af2005-04-16 15:26:08 -07002218 recv->header.wc = *wc;
2219 recv->header.recv_wc.wc = &recv->header.wc;
Ira Weiny8e4349d2015-06-10 16:16:48 -04002220
2221 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2222 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2223 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2224 } else {
2225 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2226 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2227 }
2228
Ira Weinyc9082e52015-06-06 14:38:30 -04002229 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2231
2232 if (atomic_read(&qp_info->snoop_count))
2233 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2234
2235 /* Validate MAD */
Ira Weiny8e4349d2015-06-10 16:16:48 -04002236 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 goto out;
2238
Ira Weiny4cd7c942015-06-06 14:38:31 -04002239 mad_size = recv->mad_size;
2240 response = alloc_mad_private(mad_size, GFP_KERNEL);
Hal Rosenstock445d6802007-08-03 10:45:17 -07002241 if (!response) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002242 dev_err(&port_priv->device->dev,
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08002243 "%s: no memory for response buffer\n", __func__);
Hal Rosenstock445d6802007-08-03 10:45:17 -07002244 goto out;
2245 }
2246
Hal Rosenstock41390322015-06-29 09:57:00 -04002247 if (rdma_cap_ib_switch(port_priv->device))
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002248 port_num = wc->port_num;
2249 else
2250 port_num = port_priv->port_num;
2251
Ira Weinyc9082e52015-06-06 14:38:30 -04002252 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
Ira Weiny8e4349d2015-06-10 16:16:48 -04002254 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2255 response, opa)
Ira Weinye11ae8a2015-06-06 14:38:24 -04002256 == IB_SMI_DISCARD)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 }
2259
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 /* Give driver "right of first refusal" on incoming MAD */
2261 if (port_priv->device->process_mad) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 ret = port_priv->device->process_mad(port_priv->device, 0,
2263 port_priv->port_num,
2264 wc, &recv->grh,
Ira Weiny4cd7c942015-06-06 14:38:31 -04002265 (const struct ib_mad_hdr *)recv->mad,
2266 recv->mad_size,
2267 (struct ib_mad_hdr *)response->mad,
2268 &mad_size, &resp_mad_pkey_index);
Ira Weiny8e4349d2015-06-10 16:16:48 -04002269
2270 if (opa)
2271 wc->pkey_index = resp_mad_pkey_index;
2272
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 if (ret & IB_MAD_RESULT_SUCCESS) {
2274 if (ret & IB_MAD_RESULT_CONSUMED)
2275 goto out;
2276 if (ret & IB_MAD_RESULT_REPLY) {
Ira Weinyc9082e52015-06-06 14:38:30 -04002277 agent_send_response((const struct ib_mad_hdr *)response->mad,
Sean Hefty34816ad2005-10-25 10:51:39 -07002278 &recv->grh, wc,
2279 port_priv->device,
Hal Rosenstock1bae4db2007-05-14 17:21:52 -04002280 port_num,
Ira Weinyc9082e52015-06-06 14:38:30 -04002281 qp_info->qp->qp_num,
Ira Weiny8e4349d2015-06-10 16:16:48 -04002282 mad_size, opa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 goto out;
2284 }
2285 }
2286 }
2287
Ira Weinyc9082e52015-06-06 14:38:30 -04002288 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 if (mad_agent) {
Hal Rosenstock4a0754f2005-07-27 11:45:24 -07002290 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 /*
2292 * recv is freed up in error cases in ib_mad_complete_recv
2293 * or via recv_handler in ib_mad_complete_recv()
2294 */
2295 recv = NULL;
Jack Morgensteina9e74322012-04-24 16:08:57 -07002296 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
Ira Weiny8e4349d2015-06-10 16:16:48 -04002297 generate_unmatched_resp(recv, response, &mad_size, opa)) {
Ira Weinyc9082e52015-06-06 14:38:30 -04002298 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2299 port_priv->device, port_num,
Ira Weiny8e4349d2015-06-10 16:16:48 -04002300 qp_info->qp->qp_num, mad_size, opa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 }
2302
2303out:
2304 /* Post another receive request for this QP */
2305 if (response) {
2306 ib_mad_post_receive_mads(qp_info, response);
Ira Weinyc9082e52015-06-06 14:38:30 -04002307 kfree(recv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 } else
2309 ib_mad_post_receive_mads(qp_info, recv);
2310}
2311
2312static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2313{
2314 struct ib_mad_send_wr_private *mad_send_wr;
2315 unsigned long delay;
2316
2317 if (list_empty(&mad_agent_priv->wait_list)) {
Tejun Heo136b5722012-08-21 13:18:24 -07002318 cancel_delayed_work(&mad_agent_priv->timed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 } else {
2320 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2321 struct ib_mad_send_wr_private,
2322 agent_list);
2323
2324 if (time_after(mad_agent_priv->timeout,
2325 mad_send_wr->timeout)) {
2326 mad_agent_priv->timeout = mad_send_wr->timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 delay = mad_send_wr->timeout - jiffies;
2328 if ((long)delay <= 0)
2329 delay = 1;
Tejun Heoe7c2f962012-08-21 13:18:24 -07002330 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2331 &mad_agent_priv->timed_work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332 }
2333 }
2334}
2335
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002336static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337{
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002338 struct ib_mad_agent_private *mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 struct ib_mad_send_wr_private *temp_mad_send_wr;
2340 struct list_head *list_item;
2341 unsigned long delay;
2342
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002343 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 list_del(&mad_send_wr->agent_list);
2345
2346 delay = mad_send_wr->timeout;
2347 mad_send_wr->timeout += jiffies;
2348
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002349 if (delay) {
2350 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2351 temp_mad_send_wr = list_entry(list_item,
2352 struct ib_mad_send_wr_private,
2353 agent_list);
2354 if (time_after(mad_send_wr->timeout,
2355 temp_mad_send_wr->timeout))
2356 break;
2357 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 }
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002359 else
2360 list_item = &mad_agent_priv->wait_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 list_add(&mad_send_wr->agent_list, list_item);
2362
2363 /* Reschedule a work item if we have a shorter timeout */
Tejun Heoe7c2f962012-08-21 13:18:24 -07002364 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2365 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2366 &mad_agent_priv->timed_work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367}
2368
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002369void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2370 int timeout_ms)
2371{
2372 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2373 wait_for_response(mad_send_wr);
2374}
2375
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376/*
2377 * Process a send work completion
2378 */
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002379void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2380 struct ib_mad_send_wc *mad_send_wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381{
2382 struct ib_mad_agent_private *mad_agent_priv;
2383 unsigned long flags;
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002384 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002386 mad_agent_priv = mad_send_wr->mad_agent_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Ira Weiny1471cb62014-08-08 19:00:56 -04002388 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002389 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2390 if (ret == IB_RMPP_RESULT_CONSUMED)
2391 goto done;
2392 } else
2393 ret = IB_RMPP_RESULT_UNHANDLED;
2394
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 if (mad_send_wc->status != IB_WC_SUCCESS &&
2396 mad_send_wr->status == IB_WC_SUCCESS) {
2397 mad_send_wr->status = mad_send_wc->status;
2398 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2399 }
2400
2401 if (--mad_send_wr->refcount > 0) {
2402 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2403 mad_send_wr->status == IB_WC_SUCCESS) {
Hal Rosenstockd760ce82005-07-27 11:45:25 -07002404 wait_for_response(mad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 }
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002406 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 }
2408
2409 /* Remove send from MAD agent and notify client of completion */
2410 list_del(&mad_send_wr->agent_list);
2411 adjust_timeout(mad_agent_priv);
2412 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2413
2414 if (mad_send_wr->status != IB_WC_SUCCESS )
2415 mad_send_wc->status = mad_send_wr->status;
Sean Hefty34816ad2005-10-25 10:51:39 -07002416 if (ret == IB_RMPP_RESULT_INTERNAL)
2417 ib_rmpp_send_handler(mad_send_wc);
2418 else
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002419 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2420 mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421
2422 /* Release reference on agent taken when sending */
Sean Hefty1b52fa982006-05-12 14:57:52 -07002423 deref_mad_agent(mad_agent_priv);
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002424 return;
2425done:
2426 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427}
2428
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08002429static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430{
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08002431 struct ib_mad_port_private *port_priv = cq->cq_context;
2432 struct ib_mad_list_head *mad_list =
2433 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 struct ib_mad_qp_info *qp_info;
2436 struct ib_mad_queue *send_queue;
2437 struct ib_send_wr *bad_send_wr;
Sean Hefty34816ad2005-10-25 10:51:39 -07002438 struct ib_mad_send_wc mad_send_wc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 unsigned long flags;
2440 int ret;
2441
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08002442 if (list_empty_careful(&port_priv->port_list))
2443 return;
2444
2445 if (wc->status != IB_WC_SUCCESS) {
2446 if (!ib_mad_send_error(port_priv, wc))
2447 return;
2448 }
2449
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2451 mad_list);
2452 send_queue = mad_list->mad_queue;
2453 qp_info = send_queue->qp_info;
2454
2455retry:
Ralph Campbell15271062006-12-12 14:28:30 -08002456 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2457 mad_send_wr->header_mapping,
2458 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2459 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2460 mad_send_wr->payload_mapping,
2461 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 queued_send_wr = NULL;
2463 spin_lock_irqsave(&send_queue->lock, flags);
2464 list_del(&mad_list->list);
2465
2466 /* Move queued send to the send queue */
2467 if (send_queue->count-- > send_queue->max_active) {
2468 mad_list = container_of(qp_info->overflow_list.next,
2469 struct ib_mad_list_head, list);
2470 queued_send_wr = container_of(mad_list,
2471 struct ib_mad_send_wr_private,
2472 mad_list);
Akinobu Mita179e0912006-06-26 00:24:41 -07002473 list_move_tail(&mad_list->list, &send_queue->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 }
2475 spin_unlock_irqrestore(&send_queue->lock, flags);
2476
Sean Hefty34816ad2005-10-25 10:51:39 -07002477 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2478 mad_send_wc.status = wc->status;
2479 mad_send_wc.vendor_err = wc->vendor_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 if (atomic_read(&qp_info->snoop_count))
Sean Hefty34816ad2005-10-25 10:51:39 -07002481 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 IB_MAD_SNOOP_SEND_COMPLETIONS);
Sean Hefty34816ad2005-10-25 10:51:39 -07002483 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484
2485 if (queued_send_wr) {
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002486 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
Sean Hefty34816ad2005-10-25 10:51:39 -07002487 &bad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002489 dev_err(&port_priv->device->dev,
2490 "ib_post_send failed: %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 mad_send_wr = queued_send_wr;
2492 wc->status = IB_WC_LOC_QP_OP_ERR;
2493 goto retry;
2494 }
2495 }
2496}
2497
2498static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2499{
2500 struct ib_mad_send_wr_private *mad_send_wr;
2501 struct ib_mad_list_head *mad_list;
2502 unsigned long flags;
2503
2504 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2505 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2506 mad_send_wr = container_of(mad_list,
2507 struct ib_mad_send_wr_private,
2508 mad_list);
2509 mad_send_wr->retry = 1;
2510 }
2511 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2512}
2513
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08002514static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2515 struct ib_wc *wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516{
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08002517 struct ib_mad_list_head *mad_list =
2518 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2519 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 struct ib_mad_send_wr_private *mad_send_wr;
2521 int ret;
2522
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523 /*
2524 * Send errors will transition the QP to SQE - move
2525 * QP to RTS and repost flushed work requests
2526 */
2527 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2528 mad_list);
2529 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2530 if (mad_send_wr->retry) {
2531 /* Repost send */
2532 struct ib_send_wr *bad_send_wr;
2533
2534 mad_send_wr->retry = 0;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002535 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 &bad_send_wr);
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08002537 if (!ret)
2538 return false;
2539 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 } else {
2541 struct ib_qp_attr *attr;
2542
2543 /* Transition QP to RTS and fail offending send */
2544 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2545 if (attr) {
2546 attr->qp_state = IB_QPS_RTS;
2547 attr->cur_qp_state = IB_QPS_SQE;
2548 ret = ib_modify_qp(qp_info->qp, attr,
2549 IB_QP_STATE | IB_QP_CUR_STATE);
2550 kfree(attr);
2551 if (ret)
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002552 dev_err(&port_priv->device->dev,
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08002553 "%s - ib_modify_qp to RTS: %d\n",
2554 __func__, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 else
2556 mark_sends_for_retry(qp_info);
2557 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08002560 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561}
2562
2563static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2564{
2565 unsigned long flags;
2566 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2567 struct ib_mad_send_wc mad_send_wc;
2568 struct list_head cancel_list;
2569
2570 INIT_LIST_HEAD(&cancel_list);
2571
2572 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2573 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2574 &mad_agent_priv->send_list, agent_list) {
2575 if (mad_send_wr->status == IB_WC_SUCCESS) {
Roland Dreier3cd96562006-09-22 15:22:46 -07002576 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2578 }
2579 }
2580
2581 /* Empty wait list to prevent receives from finding a request */
2582 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2583 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2584
2585 /* Report all cancelled requests */
2586 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2587 mad_send_wc.vendor_err = 0;
2588
2589 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2590 &cancel_list, agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002591 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2592 list_del(&mad_send_wr->agent_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2594 &mad_send_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595 atomic_dec(&mad_agent_priv->refcount);
2596 }
2597}
2598
2599static struct ib_mad_send_wr_private*
Sean Hefty34816ad2005-10-25 10:51:39 -07002600find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2601 struct ib_mad_send_buf *send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602{
2603 struct ib_mad_send_wr_private *mad_send_wr;
2604
2605 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2606 agent_list) {
Sean Hefty34816ad2005-10-25 10:51:39 -07002607 if (&mad_send_wr->send_buf == send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 return mad_send_wr;
2609 }
2610
2611 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2612 agent_list) {
Ira Weinyc597eee2015-05-08 13:10:03 -04002613 if (is_rmpp_data_mad(mad_agent_priv,
2614 mad_send_wr->send_buf.mad) &&
Sean Hefty34816ad2005-10-25 10:51:39 -07002615 &mad_send_wr->send_buf == send_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 return mad_send_wr;
2617 }
2618 return NULL;
2619}
2620
Sean Hefty34816ad2005-10-25 10:51:39 -07002621int ib_modify_mad(struct ib_mad_agent *mad_agent,
2622 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623{
2624 struct ib_mad_agent_private *mad_agent_priv;
2625 struct ib_mad_send_wr_private *mad_send_wr;
2626 unsigned long flags;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002627 int active;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628
2629 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2630 agent);
2631 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Sean Hefty34816ad2005-10-25 10:51:39 -07002632 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002633 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002635 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 }
2637
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002638 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002639 if (!timeout_ms) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002641 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 }
2643
Sean Hefty34816ad2005-10-25 10:51:39 -07002644 mad_send_wr->send_buf.timeout_ms = timeout_ms;
Hal Rosenstockcabe3cb2005-07-27 11:45:33 -07002645 if (active)
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002646 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2647 else
2648 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002650 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2651 return 0;
2652}
2653EXPORT_SYMBOL(ib_modify_mad);
2654
Sean Hefty34816ad2005-10-25 10:51:39 -07002655void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2656 struct ib_mad_send_buf *send_buf)
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002657{
Sean Hefty34816ad2005-10-25 10:51:39 -07002658 ib_modify_mad(mad_agent, send_buf, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659}
2660EXPORT_SYMBOL(ib_cancel_mad);
2661
David Howellsc4028952006-11-22 14:57:56 +00002662static void local_completions(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663{
2664 struct ib_mad_agent_private *mad_agent_priv;
2665 struct ib_mad_local_private *local;
2666 struct ib_mad_agent_private *recv_mad_agent;
2667 unsigned long flags;
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002668 int free_mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 struct ib_wc wc;
2670 struct ib_mad_send_wc mad_send_wc;
Ira Weiny8e4349d2015-06-10 16:16:48 -04002671 bool opa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672
David Howellsc4028952006-11-22 14:57:56 +00002673 mad_agent_priv =
2674 container_of(work, struct ib_mad_agent_private, local_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675
Ira Weiny8e4349d2015-06-10 16:16:48 -04002676 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2677 mad_agent_priv->qp_info->port_priv->port_num);
2678
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2680 while (!list_empty(&mad_agent_priv->local_list)) {
2681 local = list_entry(mad_agent_priv->local_list.next,
2682 struct ib_mad_local_private,
2683 completion_list);
Michael S. Tsirkin37289ef2006-03-30 15:52:54 +02002684 list_del(&local->completion_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002686 free_mad = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 if (local->mad_priv) {
Ira Weiny8e4349d2015-06-10 16:16:48 -04002688 u8 base_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 recv_mad_agent = local->recv_mad_agent;
2690 if (!recv_mad_agent) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002691 dev_err(&mad_agent_priv->agent.device->dev,
2692 "No receive MAD agent for local completion\n");
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002693 free_mad = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694 goto local_send_completion;
2695 }
2696
2697 /*
2698 * Defined behavior is to complete response
2699 * before request
2700 */
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +02002701 build_smp_wc(recv_mad_agent->agent.qp,
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08002702 local->mad_send_wr->send_wr.wr.wr_cqe,
Sean Hefty97f52eb2005-08-13 21:05:57 -07002703 be16_to_cpu(IB_LID_PERMISSIVE),
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002704 local->mad_send_wr->send_wr.pkey_index,
Ira Weiny8e4349d2015-06-10 16:16:48 -04002705 recv_mad_agent->agent.port_num, &wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706
2707 local->mad_priv->header.recv_wc.wc = &wc;
Ira Weiny8e4349d2015-06-10 16:16:48 -04002708
2709 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2710 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2711 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2712 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2713 } else {
2714 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2715 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2716 }
2717
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002718 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2719 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2720 &local->mad_priv->header.recv_wc.rmpp_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2722 local->mad_priv->header.recv_wc.recv_buf.mad =
Ira Weinyc9082e52015-06-06 14:38:30 -04002723 (struct ib_mad *)local->mad_priv->mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2725 snoop_recv(recv_mad_agent->qp_info,
2726 &local->mad_priv->header.recv_wc,
2727 IB_MAD_SNOOP_RECVS);
2728 recv_mad_agent->agent.recv_handler(
2729 &recv_mad_agent->agent,
Christoph Hellwigca281262016-01-04 14:15:58 +01002730 &local->mad_send_wr->send_buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 &local->mad_priv->header.recv_wc);
2732 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2733 atomic_dec(&recv_mad_agent->refcount);
2734 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2735 }
2736
2737local_send_completion:
2738 /* Complete send */
2739 mad_send_wc.status = IB_WC_SUCCESS;
2740 mad_send_wc.vendor_err = 0;
Sean Hefty34816ad2005-10-25 10:51:39 -07002741 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
Sean Hefty34816ad2005-10-25 10:51:39 -07002743 snoop_send(mad_agent_priv->qp_info,
2744 &local->mad_send_wr->send_buf,
2745 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2747 &mad_send_wc);
2748
2749 spin_lock_irqsave(&mad_agent_priv->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750 atomic_dec(&mad_agent_priv->refcount);
Ralph Campbell1d9bc6d62009-02-27 10:34:30 -08002751 if (free_mad)
Ira Weinyc9082e52015-06-06 14:38:30 -04002752 kfree(local->mad_priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 kfree(local);
2754 }
2755 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2756}
2757
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002758static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2759{
2760 int ret;
2761
Sean Hefty4fc8cd42007-11-27 00:11:04 -08002762 if (!mad_send_wr->retries_left)
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002763 return -ETIMEDOUT;
2764
Sean Hefty4fc8cd42007-11-27 00:11:04 -08002765 mad_send_wr->retries_left--;
2766 mad_send_wr->send_buf.retries++;
2767
Sean Hefty34816ad2005-10-25 10:51:39 -07002768 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002769
Ira Weiny1471cb62014-08-08 19:00:56 -04002770 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
Hal Rosenstockfa619a72005-07-27 11:45:37 -07002771 ret = ib_retry_rmpp(mad_send_wr);
2772 switch (ret) {
2773 case IB_RMPP_RESULT_UNHANDLED:
2774 ret = ib_send_mad(mad_send_wr);
2775 break;
2776 case IB_RMPP_RESULT_CONSUMED:
2777 ret = 0;
2778 break;
2779 default:
2780 ret = -ECOMM;
2781 break;
2782 }
2783 } else
2784 ret = ib_send_mad(mad_send_wr);
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002785
2786 if (!ret) {
2787 mad_send_wr->refcount++;
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002788 list_add_tail(&mad_send_wr->agent_list,
2789 &mad_send_wr->mad_agent_priv->send_list);
2790 }
2791 return ret;
2792}
2793
David Howellsc4028952006-11-22 14:57:56 +00002794static void timeout_sends(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795{
2796 struct ib_mad_agent_private *mad_agent_priv;
2797 struct ib_mad_send_wr_private *mad_send_wr;
2798 struct ib_mad_send_wc mad_send_wc;
2799 unsigned long flags, delay;
2800
David Howellsc4028952006-11-22 14:57:56 +00002801 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2802 timed_work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 mad_send_wc.vendor_err = 0;
2804
2805 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2806 while (!list_empty(&mad_agent_priv->wait_list)) {
2807 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2808 struct ib_mad_send_wr_private,
2809 agent_list);
2810
2811 if (time_after(mad_send_wr->timeout, jiffies)) {
2812 delay = mad_send_wr->timeout - jiffies;
2813 if ((long)delay <= 0)
2814 delay = 1;
2815 queue_delayed_work(mad_agent_priv->qp_info->
2816 port_priv->wq,
2817 &mad_agent_priv->timed_work, delay);
2818 break;
2819 }
2820
Hal Rosenstockdbf92272005-07-27 11:45:30 -07002821 list_del(&mad_send_wr->agent_list);
Hal Rosenstock29bb33d2005-07-27 11:45:32 -07002822 if (mad_send_wr->status == IB_WC_SUCCESS &&
2823 !retry_send(mad_send_wr))
Hal Rosenstockf75b7a52005-07-27 11:45:29 -07002824 continue;
2825
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2827
Hal Rosenstock03b61ad2005-07-27 11:45:32 -07002828 if (mad_send_wr->status == IB_WC_SUCCESS)
2829 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2830 else
2831 mad_send_wc.status = mad_send_wr->status;
Sean Hefty34816ad2005-10-25 10:51:39 -07002832 mad_send_wc.send_buf = &mad_send_wr->send_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2834 &mad_send_wc);
2835
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 atomic_dec(&mad_agent_priv->refcount);
2837 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2838 }
2839 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2840}
2841
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842/*
2843 * Allocate receive MADs and post receive WRs for them
2844 */
2845static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2846 struct ib_mad_private *mad)
2847{
2848 unsigned long flags;
2849 int post, ret;
2850 struct ib_mad_private *mad_priv;
2851 struct ib_sge sg_list;
2852 struct ib_recv_wr recv_wr, *bad_recv_wr;
2853 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2854
2855 /* Initialize common scatter list fields */
Jason Gunthorpe4be90bc2015-07-30 17:22:16 -06002856 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857
2858 /* Initialize common receive WR fields */
2859 recv_wr.next = NULL;
2860 recv_wr.sg_list = &sg_list;
2861 recv_wr.num_sge = 1;
2862
2863 do {
2864 /* Allocate and map receive buffer */
2865 if (mad) {
2866 mad_priv = mad;
2867 mad = NULL;
2868 } else {
Ira Weinyc9082e52015-06-06 14:38:30 -04002869 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2870 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 if (!mad_priv) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002872 dev_err(&qp_info->port_priv->device->dev,
2873 "No memory for receive buffer\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 ret = -ENOMEM;
2875 break;
2876 }
2877 }
Ira Weinyc9082e52015-06-06 14:38:30 -04002878 sg_list.length = mad_priv_dma_size(mad_priv);
Ralph Campbell15271062006-12-12 14:28:30 -08002879 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2880 &mad_priv->grh,
Ira Weinyc9082e52015-06-06 14:38:30 -04002881 mad_priv_dma_size(mad_priv),
Ralph Campbell15271062006-12-12 14:28:30 -08002882 DMA_FROM_DEVICE);
Yan Burman2c34e682014-03-11 14:41:47 +02002883 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2884 sg_list.addr))) {
2885 ret = -ENOMEM;
2886 break;
2887 }
Ralph Campbell15271062006-12-12 14:28:30 -08002888 mad_priv->header.mapping = sg_list.addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 mad_priv->header.mad_list.mad_queue = recv_queue;
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08002890 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2891 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892
2893 /* Post receive WR */
2894 spin_lock_irqsave(&recv_queue->lock, flags);
2895 post = (++recv_queue->count < recv_queue->max_active);
2896 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2897 spin_unlock_irqrestore(&recv_queue->lock, flags);
2898 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2899 if (ret) {
2900 spin_lock_irqsave(&recv_queue->lock, flags);
2901 list_del(&mad_priv->header.mad_list.list);
2902 recv_queue->count--;
2903 spin_unlock_irqrestore(&recv_queue->lock, flags);
Ralph Campbell15271062006-12-12 14:28:30 -08002904 ib_dma_unmap_single(qp_info->port_priv->device,
2905 mad_priv->header.mapping,
Ira Weinyc9082e52015-06-06 14:38:30 -04002906 mad_priv_dma_size(mad_priv),
Ralph Campbell15271062006-12-12 14:28:30 -08002907 DMA_FROM_DEVICE);
Ira Weinyc9082e52015-06-06 14:38:30 -04002908 kfree(mad_priv);
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002909 dev_err(&qp_info->port_priv->device->dev,
2910 "ib_post_recv failed: %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 break;
2912 }
2913 } while (post);
2914
2915 return ret;
2916}
2917
2918/*
2919 * Return all the posted receive MADs
2920 */
2921static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2922{
2923 struct ib_mad_private_header *mad_priv_hdr;
2924 struct ib_mad_private *recv;
2925 struct ib_mad_list_head *mad_list;
2926
Eli Cohenfac70d52010-09-27 17:51:11 -07002927 if (!qp_info->qp)
2928 return;
2929
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 while (!list_empty(&qp_info->recv_queue.list)) {
2931
2932 mad_list = list_entry(qp_info->recv_queue.list.next,
2933 struct ib_mad_list_head, list);
2934 mad_priv_hdr = container_of(mad_list,
2935 struct ib_mad_private_header,
2936 mad_list);
2937 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2938 header);
2939
2940 /* Remove from posted receive MAD list */
2941 list_del(&mad_list->list);
2942
Ralph Campbell15271062006-12-12 14:28:30 -08002943 ib_dma_unmap_single(qp_info->port_priv->device,
2944 recv->header.mapping,
Ira Weinyc9082e52015-06-06 14:38:30 -04002945 mad_priv_dma_size(recv),
Ralph Campbell15271062006-12-12 14:28:30 -08002946 DMA_FROM_DEVICE);
Ira Weinyc9082e52015-06-06 14:38:30 -04002947 kfree(recv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 }
2949
2950 qp_info->recv_queue.count = 0;
2951}
2952
2953/*
2954 * Start the port
2955 */
2956static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2957{
2958 int ret, i;
2959 struct ib_qp_attr *attr;
2960 struct ib_qp *qp;
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002961 u16 pkey_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962
2963 attr = kmalloc(sizeof *attr, GFP_KERNEL);
Roland Dreier3cd96562006-09-22 15:22:46 -07002964 if (!attr) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002965 dev_err(&port_priv->device->dev,
2966 "Couldn't kmalloc ib_qp_attr\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 return -ENOMEM;
2968 }
2969
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002970 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2971 IB_DEFAULT_PKEY_FULL, &pkey_index);
2972 if (ret)
2973 pkey_index = 0;
2974
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2976 qp = port_priv->qp_info[i].qp;
Eli Cohenfac70d52010-09-27 17:51:11 -07002977 if (!qp)
2978 continue;
2979
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 /*
2981 * PKey index for QP1 is irrelevant but
2982 * one is needed for the Reset to Init transition
2983 */
2984 attr->qp_state = IB_QPS_INIT;
Jack Morgensteinef5ed412013-07-18 14:02:29 +03002985 attr->pkey_index = pkey_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2987 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2988 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2989 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002990 dev_err(&port_priv->device->dev,
2991 "Couldn't change QP%d state to INIT: %d\n",
2992 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 goto out;
2994 }
2995
2996 attr->qp_state = IB_QPS_RTR;
2997 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2998 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04002999 dev_err(&port_priv->device->dev,
3000 "Couldn't change QP%d state to RTR: %d\n",
3001 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 goto out;
3003 }
3004
3005 attr->qp_state = IB_QPS_RTS;
3006 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3007 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3008 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003009 dev_err(&port_priv->device->dev,
3010 "Couldn't change QP%d state to RTS: %d\n",
3011 i, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 goto out;
3013 }
3014 }
3015
3016 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3017 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003018 dev_err(&port_priv->device->dev,
3019 "Failed to request completion notification: %d\n",
3020 ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021 goto out;
3022 }
3023
3024 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
Eli Cohenfac70d52010-09-27 17:51:11 -07003025 if (!port_priv->qp_info[i].qp)
3026 continue;
3027
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3029 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003030 dev_err(&port_priv->device->dev,
3031 "Couldn't post receive WRs\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003032 goto out;
3033 }
3034 }
3035out:
3036 kfree(attr);
3037 return ret;
3038}
3039
3040static void qp_event_handler(struct ib_event *event, void *qp_context)
3041{
3042 struct ib_mad_qp_info *qp_info = qp_context;
3043
3044 /* It's worse than that! He's dead, Jim! */
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003045 dev_err(&qp_info->port_priv->device->dev,
3046 "Fatal error (%d) on MAD QP (%d)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 event->event, qp_info->qp->qp_num);
3048}
3049
3050static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3051 struct ib_mad_queue *mad_queue)
3052{
3053 mad_queue->qp_info = qp_info;
3054 mad_queue->count = 0;
3055 spin_lock_init(&mad_queue->lock);
3056 INIT_LIST_HEAD(&mad_queue->list);
3057}
3058
3059static void init_mad_qp(struct ib_mad_port_private *port_priv,
3060 struct ib_mad_qp_info *qp_info)
3061{
3062 qp_info->port_priv = port_priv;
3063 init_mad_queue(qp_info, &qp_info->send_queue);
3064 init_mad_queue(qp_info, &qp_info->recv_queue);
3065 INIT_LIST_HEAD(&qp_info->overflow_list);
3066 spin_lock_init(&qp_info->snoop_lock);
3067 qp_info->snoop_table = NULL;
3068 qp_info->snoop_table_size = 0;
3069 atomic_set(&qp_info->snoop_count, 0);
3070}
3071
3072static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3073 enum ib_qp_type qp_type)
3074{
3075 struct ib_qp_init_attr qp_init_attr;
3076 int ret;
3077
3078 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3079 qp_init_attr.send_cq = qp_info->port_priv->cq;
3080 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3081 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07003082 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3083 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3085 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3086 qp_init_attr.qp_type = qp_type;
3087 qp_init_attr.port_num = qp_info->port_priv->port_num;
3088 qp_init_attr.qp_context = qp_info;
3089 qp_init_attr.event_handler = qp_event_handler;
3090 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3091 if (IS_ERR(qp_info->qp)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003092 dev_err(&qp_info->port_priv->device->dev,
3093 "Couldn't create ib_mad QP%d\n",
3094 get_spl_qp_index(qp_type));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 ret = PTR_ERR(qp_info->qp);
3096 goto error;
3097 }
3098 /* Use minimum queue sizes unless the CQ is resized */
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07003099 qp_info->send_queue.max_active = mad_sendq_size;
3100 qp_info->recv_queue.max_active = mad_recvq_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 return 0;
3102
3103error:
3104 return ret;
3105}
3106
3107static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3108{
Eli Cohenfac70d52010-09-27 17:51:11 -07003109 if (!qp_info->qp)
3110 return;
3111
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112 ib_destroy_qp(qp_info->qp);
Jesper Juhl6044ec82005-11-07 01:01:32 -08003113 kfree(qp_info->snoop_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114}
3115
3116/*
3117 * Open the port
3118 * Create the QP, PD, MR, and CQ if needed
3119 */
3120static int ib_mad_port_open(struct ib_device *device,
3121 int port_num)
3122{
3123 int ret, cq_size;
3124 struct ib_mad_port_private *port_priv;
3125 unsigned long flags;
3126 char name[sizeof "ib_mad123"];
Eli Cohenfac70d52010-09-27 17:51:11 -07003127 int has_smi;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128
Ira Weiny337877a2015-06-06 14:38:29 -04003129 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3130 return -EFAULT;
3131
Ira Weiny548ead12015-06-06 14:38:33 -04003132 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3133 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3134 return -EFAULT;
3135
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136 /* Create new device info */
Roland Dreierde6eb662005-11-02 07:23:14 -08003137 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138 if (!port_priv) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003139 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140 return -ENOMEM;
3141 }
Roland Dreierde6eb662005-11-02 07:23:14 -08003142
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143 port_priv->device = device;
3144 port_priv->port_num = port_num;
3145 spin_lock_init(&port_priv->reg_lock);
3146 INIT_LIST_HEAD(&port_priv->agent_list);
3147 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3148 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3149
Eli Cohenfac70d52010-09-27 17:51:11 -07003150 cq_size = mad_sendq_size + mad_recvq_size;
Michael Wang29541e32015-05-05 14:50:33 +02003151 has_smi = rdma_cap_ib_smi(device, port_num);
Eli Cohenfac70d52010-09-27 17:51:11 -07003152 if (has_smi)
3153 cq_size *= 2;
3154
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08003155 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3156 IB_POLL_WORKQUEUE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157 if (IS_ERR(port_priv->cq)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003158 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 ret = PTR_ERR(port_priv->cq);
3160 goto error3;
3161 }
3162
Christoph Hellwiged082d32016-09-05 12:56:17 +02003163 port_priv->pd = ib_alloc_pd(device, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 if (IS_ERR(port_priv->pd)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003165 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166 ret = PTR_ERR(port_priv->pd);
3167 goto error4;
3168 }
3169
Eli Cohenfac70d52010-09-27 17:51:11 -07003170 if (has_smi) {
3171 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3172 if (ret)
3173 goto error6;
3174 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3176 if (ret)
3177 goto error7;
3178
3179 snprintf(name, sizeof name, "ib_mad%d", port_num);
Bhaktipriya Shridhar1c99e292016-08-15 23:28:07 +05303180 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181 if (!port_priv->wq) {
3182 ret = -ENOMEM;
3183 goto error8;
3184 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185
Michael S. Tsirkindc059802006-03-20 10:08:25 -08003186 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3187 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3188 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3189
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190 ret = ib_mad_port_start(port_priv);
3191 if (ret) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003192 dev_err(&device->dev, "Couldn't start port\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193 goto error9;
3194 }
3195
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196 return 0;
3197
3198error9:
Michael S. Tsirkindc059802006-03-20 10:08:25 -08003199 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3200 list_del_init(&port_priv->port_list);
3201 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3202
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 destroy_workqueue(port_priv->wq);
3204error8:
3205 destroy_mad_qp(&port_priv->qp_info[1]);
3206error7:
3207 destroy_mad_qp(&port_priv->qp_info[0]);
3208error6:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 ib_dealloc_pd(port_priv->pd);
3210error4:
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08003211 ib_free_cq(port_priv->cq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212 cleanup_recv_queue(&port_priv->qp_info[1]);
3213 cleanup_recv_queue(&port_priv->qp_info[0]);
3214error3:
3215 kfree(port_priv);
3216
3217 return ret;
3218}
3219
3220/*
3221 * Close the port
3222 * If there are no classes using the port, free the port
3223 * resources (CQ, MR, PD, QP) and remove the port's info structure
3224 */
3225static int ib_mad_port_close(struct ib_device *device, int port_num)
3226{
3227 struct ib_mad_port_private *port_priv;
3228 unsigned long flags;
3229
3230 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3231 port_priv = __ib_get_mad_port(device, port_num);
3232 if (port_priv == NULL) {
3233 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003234 dev_err(&device->dev, "Port %d not found\n", port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235 return -ENODEV;
3236 }
Michael S. Tsirkindc059802006-03-20 10:08:25 -08003237 list_del_init(&port_priv->port_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3239
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240 destroy_workqueue(port_priv->wq);
3241 destroy_mad_qp(&port_priv->qp_info[1]);
3242 destroy_mad_qp(&port_priv->qp_info[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 ib_dealloc_pd(port_priv->pd);
Christoph Hellwigd53e11f2016-01-05 22:46:12 -08003244 ib_free_cq(port_priv->cq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245 cleanup_recv_queue(&port_priv->qp_info[1]);
3246 cleanup_recv_queue(&port_priv->qp_info[0]);
3247 /* XXX: Handle deallocation of MAD registration tables */
3248
3249 kfree(port_priv);
3250
3251 return 0;
3252}
3253
3254static void ib_mad_init_device(struct ib_device *device)
3255{
Hal Rosenstock41390322015-06-29 09:57:00 -04003256 int start, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257
Hal Rosenstock41390322015-06-29 09:57:00 -04003258 start = rdma_start_port(device);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003259
Hal Rosenstock41390322015-06-29 09:57:00 -04003260 for (i = start; i <= rdma_end_port(device); i++) {
Michael Wangc757dea2015-05-05 14:50:32 +02003261 if (!rdma_cap_ib_mad(device, i))
Michael Wang827f2a82015-05-05 14:50:20 +02003262 continue;
3263
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003264 if (ib_mad_port_open(device, i)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003265 dev_err(&device->dev, "Couldn't open port %d\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003266 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267 }
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003268 if (ib_agent_port_open(device, i)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003269 dev_err(&device->dev,
3270 "Couldn't open port %d for agents\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003271 goto error_agent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272 }
3273 }
Hal Rosenstockf68bcc22005-07-27 11:45:27 -07003274 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003276error_agent:
3277 if (ib_mad_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003278 dev_err(&device->dev, "Couldn't close port %d\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003279
3280error:
Michael Wang827f2a82015-05-05 14:50:20 +02003281 while (--i >= start) {
Michael Wangc757dea2015-05-05 14:50:32 +02003282 if (!rdma_cap_ib_mad(device, i))
Michael Wang827f2a82015-05-05 14:50:20 +02003283 continue;
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003284
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003285 if (ib_agent_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003286 dev_err(&device->dev,
3287 "Couldn't close port %d for agents\n", i);
Roland Dreier4ab6fb72005-10-06 13:28:16 -07003288 if (ib_mad_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003289 dev_err(&device->dev, "Couldn't close port %d\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291}
3292
Haggai Eran7c1eb452015-07-30 17:50:14 +03003293static void ib_mad_remove_device(struct ib_device *device, void *client_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294{
Hal Rosenstock41390322015-06-29 09:57:00 -04003295 int i;
Steve Wise070e1402010-03-04 18:18:18 +00003296
Hal Rosenstock41390322015-06-29 09:57:00 -04003297 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
Michael Wangc757dea2015-05-05 14:50:32 +02003298 if (!rdma_cap_ib_mad(device, i))
Michael Wang827f2a82015-05-05 14:50:20 +02003299 continue;
3300
3301 if (ib_agent_port_close(device, i))
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003302 dev_err(&device->dev,
Michael Wang827f2a82015-05-05 14:50:20 +02003303 "Couldn't close port %d for agents\n", i);
3304 if (ib_mad_port_close(device, i))
3305 dev_err(&device->dev, "Couldn't close port %d\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306 }
3307}
3308
3309static struct ib_client mad_client = {
3310 .name = "mad",
3311 .add = ib_mad_init_device,
3312 .remove = ib_mad_remove_device
3313};
3314
Mark Bloch4c2cb422016-05-19 17:12:32 +03003315int ib_mad_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316{
Hal Rosenstockb76aabc2009-09-07 08:28:48 -07003317 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3318 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3319
3320 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3321 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3322
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 INIT_LIST_HEAD(&ib_mad_port_list);
3324
3325 if (ib_register_client(&mad_client)) {
Ira Weiny7ef5d4b2014-08-08 19:00:53 -04003326 pr_err("Couldn't register ib_mad client\n");
Ira Weinyc9082e52015-06-06 14:38:30 -04003327 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328 }
3329
3330 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331}
3332
Mark Bloch4c2cb422016-05-19 17:12:32 +03003333void ib_mad_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334{
3335 ib_unregister_client(&mad_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336}