Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Hal Rosenstock | de493d4 | 2007-04-02 11:24:07 -0400 | [diff] [blame] | 2 | * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. |
| 4 | * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. |
Hal Rosenstock | b76aabc | 2009-09-07 08:28:48 -0700 | [diff] [blame] | 5 | * Copyright (c) 2009 HNR Consulting. All rights reserved. |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 6 | * Copyright (c) 2014 Intel Corporation. All rights reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * |
| 8 | * This software is available to you under a choice of one of two |
| 9 | * licenses. You may choose to be licensed under the terms of the GNU |
| 10 | * General Public License (GPL) Version 2, available from the file |
| 11 | * COPYING in the main directory of this source tree, or the |
| 12 | * OpenIB.org BSD license below: |
| 13 | * |
| 14 | * Redistribution and use in source and binary forms, with or |
| 15 | * without modification, are permitted provided that the following |
| 16 | * conditions are met: |
| 17 | * |
| 18 | * - Redistributions of source code must retain the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer. |
| 21 | * |
| 22 | * - Redistributions in binary form must reproduce the above |
| 23 | * copyright notice, this list of conditions and the following |
| 24 | * disclaimer in the documentation and/or other materials |
| 25 | * provided with the distribution. |
| 26 | * |
| 27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 28 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 29 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 30 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 31 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 32 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 34 | * SOFTWARE. |
| 35 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | */ |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 37 | |
| 38 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 39 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | #include <linux/dma-mapping.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 41 | #include <linux/slab.h> |
Paul Gortmaker | e4dd23d | 2011-05-27 15:35:46 -0400 | [diff] [blame] | 42 | #include <linux/module.h> |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 43 | #include <linux/security.h> |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 44 | #include <rdma/ib_cache.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
| 46 | #include "mad_priv.h" |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 47 | #include "core_priv.h" |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 48 | #include "mad_rmpp.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | #include "smi.h" |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 50 | #include "opa_smi.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #include "agent.h" |
Mark Bloch | 4c2cb42 | 2016-05-19 17:12:32 +0300 | [diff] [blame] | 52 | #include "core_priv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | |
Roland Dreier | 1693395 | 2010-05-23 21:39:31 -0700 | [diff] [blame] | 54 | static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; |
| 55 | static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; |
Hal Rosenstock | b76aabc | 2009-09-07 08:28:48 -0700 | [diff] [blame] | 56 | |
| 57 | module_param_named(send_queue_size, mad_sendq_size, int, 0444); |
| 58 | MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); |
| 59 | module_param_named(recv_queue_size, mad_recvq_size, int, 0444); |
| 60 | MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); |
| 61 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | static struct list_head ib_mad_port_list; |
| 63 | static u32 ib_mad_client_id = 0; |
| 64 | |
| 65 | /* Port list lock */ |
Roland Dreier | 6276e08 | 2009-09-05 20:24:23 -0700 | [diff] [blame] | 66 | static DEFINE_SPINLOCK(ib_mad_port_list_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | |
| 68 | /* Forward declarations */ |
| 69 | static int method_in_use(struct ib_mad_mgmt_method_table **method, |
| 70 | struct ib_mad_reg_req *mad_reg_req); |
| 71 | static void remove_mad_reg_req(struct ib_mad_agent_private *priv); |
| 72 | static struct ib_mad_agent_private *find_mad_agent( |
| 73 | struct ib_mad_port_private *port_priv, |
Ira Weiny | d94bd26 | 2015-06-06 14:38:22 -0400 | [diff] [blame] | 74 | const struct ib_mad_hdr *mad); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, |
| 76 | struct ib_mad_private *mad); |
| 77 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 78 | static void timeout_sends(struct work_struct *work); |
| 79 | static void local_completions(struct work_struct *work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, |
| 81 | struct ib_mad_agent_private *agent_priv, |
| 82 | u8 mgmt_class); |
| 83 | static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, |
| 84 | struct ib_mad_agent_private *agent_priv); |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 85 | static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, |
| 86 | struct ib_wc *wc); |
| 87 | static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | |
| 89 | /* |
| 90 | * Returns a ib_mad_port_private structure or NULL for a device/port |
| 91 | * Assumes ib_mad_port_list_lock is being held |
| 92 | */ |
| 93 | static inline struct ib_mad_port_private * |
| 94 | __ib_get_mad_port(struct ib_device *device, int port_num) |
| 95 | { |
| 96 | struct ib_mad_port_private *entry; |
| 97 | |
| 98 | list_for_each_entry(entry, &ib_mad_port_list, port_list) { |
| 99 | if (entry->device == device && entry->port_num == port_num) |
| 100 | return entry; |
| 101 | } |
| 102 | return NULL; |
| 103 | } |
| 104 | |
| 105 | /* |
| 106 | * Wrapper function to return a ib_mad_port_private structure or NULL |
| 107 | * for a device/port |
| 108 | */ |
| 109 | static inline struct ib_mad_port_private * |
| 110 | ib_get_mad_port(struct ib_device *device, int port_num) |
| 111 | { |
| 112 | struct ib_mad_port_private *entry; |
| 113 | unsigned long flags; |
| 114 | |
| 115 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); |
| 116 | entry = __ib_get_mad_port(device, port_num); |
| 117 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); |
| 118 | |
| 119 | return entry; |
| 120 | } |
| 121 | |
| 122 | static inline u8 convert_mgmt_class(u8 mgmt_class) |
| 123 | { |
| 124 | /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */ |
| 125 | return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? |
| 126 | 0 : mgmt_class; |
| 127 | } |
| 128 | |
| 129 | static int get_spl_qp_index(enum ib_qp_type qp_type) |
| 130 | { |
| 131 | switch (qp_type) |
| 132 | { |
| 133 | case IB_QPT_SMI: |
| 134 | return 0; |
| 135 | case IB_QPT_GSI: |
| 136 | return 1; |
| 137 | default: |
| 138 | return -1; |
| 139 | } |
| 140 | } |
| 141 | |
| 142 | static int vendor_class_index(u8 mgmt_class) |
| 143 | { |
| 144 | return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; |
| 145 | } |
| 146 | |
| 147 | static int is_vendor_class(u8 mgmt_class) |
| 148 | { |
| 149 | if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) || |
| 150 | (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END)) |
| 151 | return 0; |
| 152 | return 1; |
| 153 | } |
| 154 | |
| 155 | static int is_vendor_oui(char *oui) |
| 156 | { |
| 157 | if (oui[0] || oui[1] || oui[2]) |
| 158 | return 1; |
| 159 | return 0; |
| 160 | } |
| 161 | |
| 162 | static int is_vendor_method_in_use( |
| 163 | struct ib_mad_mgmt_vendor_class *vendor_class, |
| 164 | struct ib_mad_reg_req *mad_reg_req) |
| 165 | { |
| 166 | struct ib_mad_mgmt_method_table *method; |
| 167 | int i; |
| 168 | |
| 169 | for (i = 0; i < MAX_MGMT_OUI; i++) { |
| 170 | if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { |
| 171 | method = vendor_class->method_table[i]; |
| 172 | if (method) { |
| 173 | if (method_in_use(&method, mad_reg_req)) |
| 174 | return 1; |
| 175 | else |
| 176 | break; |
| 177 | } |
| 178 | } |
| 179 | } |
| 180 | return 0; |
| 181 | } |
| 182 | |
Ira Weiny | 9690930 | 2015-05-08 14:27:22 -0400 | [diff] [blame] | 183 | int ib_response_mad(const struct ib_mad_hdr *hdr) |
Sean Hefty | 2527e68 | 2006-07-20 11:25:50 +0300 | [diff] [blame] | 184 | { |
Ira Weiny | 9690930 | 2015-05-08 14:27:22 -0400 | [diff] [blame] | 185 | return ((hdr->method & IB_MGMT_METHOD_RESP) || |
| 186 | (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || |
| 187 | ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && |
| 188 | (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); |
Sean Hefty | 2527e68 | 2006-07-20 11:25:50 +0300 | [diff] [blame] | 189 | } |
| 190 | EXPORT_SYMBOL(ib_response_mad); |
| 191 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | /* |
| 193 | * ib_register_mad_agent - Register to send/receive MADs |
| 194 | */ |
| 195 | struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, |
| 196 | u8 port_num, |
| 197 | enum ib_qp_type qp_type, |
| 198 | struct ib_mad_reg_req *mad_reg_req, |
| 199 | u8 rmpp_version, |
| 200 | ib_mad_send_handler send_handler, |
| 201 | ib_mad_recv_handler recv_handler, |
Ira Weiny | 0f29b46 | 2014-08-08 19:00:55 -0400 | [diff] [blame] | 202 | void *context, |
| 203 | u32 registration_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | { |
| 205 | struct ib_mad_port_private *port_priv; |
| 206 | struct ib_mad_agent *ret = ERR_PTR(-EINVAL); |
| 207 | struct ib_mad_agent_private *mad_agent_priv; |
| 208 | struct ib_mad_reg_req *reg_req = NULL; |
| 209 | struct ib_mad_mgmt_class_table *class; |
| 210 | struct ib_mad_mgmt_vendor_class_table *vendor; |
| 211 | struct ib_mad_mgmt_vendor_class *vendor_class; |
| 212 | struct ib_mad_mgmt_method_table *method; |
| 213 | int ret2, qpn; |
| 214 | unsigned long flags; |
| 215 | u8 mgmt_class, vclass; |
| 216 | |
| 217 | /* Validate parameters */ |
| 218 | qpn = get_spl_qp_index(qp_type); |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 219 | if (qpn == -1) { |
| 220 | dev_notice(&device->dev, |
| 221 | "ib_register_mad_agent: invalid QP Type %d\n", |
| 222 | qp_type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | goto error1; |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 224 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 226 | if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { |
| 227 | dev_notice(&device->dev, |
| 228 | "ib_register_mad_agent: invalid RMPP Version %u\n", |
| 229 | rmpp_version); |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 230 | goto error1; |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 231 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | |
| 233 | /* Validate MAD registration request if supplied */ |
| 234 | if (mad_reg_req) { |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 235 | if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { |
| 236 | dev_notice(&device->dev, |
| 237 | "ib_register_mad_agent: invalid Class Version %u\n", |
| 238 | mad_reg_req->mgmt_class_version); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | goto error1; |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 240 | } |
| 241 | if (!recv_handler) { |
| 242 | dev_notice(&device->dev, |
| 243 | "ib_register_mad_agent: no recv_handler\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | goto error1; |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 245 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { |
| 247 | /* |
| 248 | * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only |
| 249 | * one in this range currently allowed |
| 250 | */ |
| 251 | if (mad_reg_req->mgmt_class != |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 252 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { |
| 253 | dev_notice(&device->dev, |
| 254 | "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n", |
| 255 | mad_reg_req->mgmt_class); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | goto error1; |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 257 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | } else if (mad_reg_req->mgmt_class == 0) { |
| 259 | /* |
| 260 | * Class 0 is reserved in IBA and is used for |
| 261 | * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE |
| 262 | */ |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 263 | dev_notice(&device->dev, |
| 264 | "ib_register_mad_agent: Invalid Mgmt Class 0\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | goto error1; |
| 266 | } else if (is_vendor_class(mad_reg_req->mgmt_class)) { |
| 267 | /* |
| 268 | * If class is in "new" vendor range, |
| 269 | * ensure supplied OUI is not zero |
| 270 | */ |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 271 | if (!is_vendor_oui(mad_reg_req->oui)) { |
| 272 | dev_notice(&device->dev, |
| 273 | "ib_register_mad_agent: No OUI specified for class 0x%x\n", |
| 274 | mad_reg_req->mgmt_class); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | goto error1; |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 276 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | } |
Hal Rosenstock | 618a3c0 | 2006-03-28 16:40:04 -0800 | [diff] [blame] | 278 | /* Make sure class supplied is consistent with RMPP */ |
Hal Rosenstock | 64cb9c6 | 2006-04-12 21:29:10 -0400 | [diff] [blame] | 279 | if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 280 | if (rmpp_version) { |
| 281 | dev_notice(&device->dev, |
| 282 | "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n", |
| 283 | mad_reg_req->mgmt_class); |
Hal Rosenstock | 618a3c0 | 2006-03-28 16:40:04 -0800 | [diff] [blame] | 284 | goto error1; |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 285 | } |
Hal Rosenstock | 618a3c0 | 2006-03-28 16:40:04 -0800 | [diff] [blame] | 286 | } |
Ira Weiny | 1471cb6 | 2014-08-08 19:00:56 -0400 | [diff] [blame] | 287 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | /* Make sure class supplied is consistent with QP type */ |
| 289 | if (qp_type == IB_QPT_SMI) { |
| 290 | if ((mad_reg_req->mgmt_class != |
| 291 | IB_MGMT_CLASS_SUBN_LID_ROUTED) && |
| 292 | (mad_reg_req->mgmt_class != |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 293 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { |
| 294 | dev_notice(&device->dev, |
| 295 | "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n", |
| 296 | mad_reg_req->mgmt_class); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | goto error1; |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 298 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | } else { |
| 300 | if ((mad_reg_req->mgmt_class == |
| 301 | IB_MGMT_CLASS_SUBN_LID_ROUTED) || |
| 302 | (mad_reg_req->mgmt_class == |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 303 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { |
| 304 | dev_notice(&device->dev, |
| 305 | "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n", |
| 306 | mad_reg_req->mgmt_class); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | goto error1; |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 308 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | } |
| 310 | } else { |
| 311 | /* No registration request supplied */ |
| 312 | if (!send_handler) |
| 313 | goto error1; |
Ira Weiny | 1471cb6 | 2014-08-08 19:00:56 -0400 | [diff] [blame] | 314 | if (registration_flags & IB_MAD_USER_RMPP) |
| 315 | goto error1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | } |
| 317 | |
| 318 | /* Validate device and port */ |
| 319 | port_priv = ib_get_mad_port(device, port_num); |
| 320 | if (!port_priv) { |
Yuval Shaia | f57e8ca | 2017-01-19 16:48:31 +0200 | [diff] [blame] | 321 | dev_notice(&device->dev, |
| 322 | "ib_register_mad_agent: Invalid port %d\n", |
| 323 | port_num); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | ret = ERR_PTR(-ENODEV); |
| 325 | goto error1; |
| 326 | } |
| 327 | |
Ira Weiny | c8367c4 | 2011-05-19 18:19:28 -0700 | [diff] [blame] | 328 | /* Verify the QP requested is supported. For example, Ethernet devices |
| 329 | * will not have QP0 */ |
| 330 | if (!port_priv->qp_info[qpn].qp) { |
Ira Weiny | 9ad13a4 | 2014-08-08 19:00:54 -0400 | [diff] [blame] | 331 | dev_notice(&device->dev, |
| 332 | "ib_register_mad_agent: QP %d not supported\n", qpn); |
Ira Weiny | c8367c4 | 2011-05-19 18:19:28 -0700 | [diff] [blame] | 333 | ret = ERR_PTR(-EPROTONOSUPPORT); |
| 334 | goto error1; |
| 335 | } |
| 336 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | /* Allocate structures */ |
Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 338 | mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | if (!mad_agent_priv) { |
| 340 | ret = ERR_PTR(-ENOMEM); |
| 341 | goto error1; |
| 342 | } |
Hal Rosenstock | b82cab6 | 2005-07-27 11:45:22 -0700 | [diff] [blame] | 343 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | if (mad_reg_req) { |
Julia Lawall | 9893e74 | 2010-05-15 23:22:38 +0200 | [diff] [blame] | 345 | reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | if (!reg_req) { |
| 347 | ret = ERR_PTR(-ENOMEM); |
Hal Rosenstock | b82cab6 | 2005-07-27 11:45:22 -0700 | [diff] [blame] | 348 | goto error3; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | } |
| 351 | |
| 352 | /* Now, fill in the various structures */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; |
| 354 | mad_agent_priv->reg_req = reg_req; |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 355 | mad_agent_priv->agent.rmpp_version = rmpp_version; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | mad_agent_priv->agent.device = device; |
| 357 | mad_agent_priv->agent.recv_handler = recv_handler; |
| 358 | mad_agent_priv->agent.send_handler = send_handler; |
| 359 | mad_agent_priv->agent.context = context; |
| 360 | mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; |
| 361 | mad_agent_priv->agent.port_num = port_num; |
Ira Weiny | 0f29b46 | 2014-08-08 19:00:55 -0400 | [diff] [blame] | 362 | mad_agent_priv->agent.flags = registration_flags; |
Ralph Campbell | d9620a4 | 2009-02-27 14:44:32 -0800 | [diff] [blame] | 363 | spin_lock_init(&mad_agent_priv->lock); |
| 364 | INIT_LIST_HEAD(&mad_agent_priv->send_list); |
| 365 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); |
| 366 | INIT_LIST_HEAD(&mad_agent_priv->done_list); |
| 367 | INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); |
| 368 | INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); |
| 369 | INIT_LIST_HEAD(&mad_agent_priv->local_list); |
| 370 | INIT_WORK(&mad_agent_priv->local_work, local_completions); |
| 371 | atomic_set(&mad_agent_priv->refcount, 1); |
| 372 | init_completion(&mad_agent_priv->comp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 374 | ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); |
| 375 | if (ret2) { |
| 376 | ret = ERR_PTR(ret2); |
| 377 | goto error4; |
| 378 | } |
| 379 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | spin_lock_irqsave(&port_priv->reg_lock, flags); |
| 381 | mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; |
| 382 | |
| 383 | /* |
| 384 | * Make sure MAD registration (if supplied) |
| 385 | * is non overlapping with any existing ones |
| 386 | */ |
| 387 | if (mad_reg_req) { |
| 388 | mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); |
| 389 | if (!is_vendor_class(mgmt_class)) { |
| 390 | class = port_priv->version[mad_reg_req-> |
| 391 | mgmt_class_version].class; |
| 392 | if (class) { |
| 393 | method = class->method_table[mgmt_class]; |
| 394 | if (method) { |
| 395 | if (method_in_use(&method, |
| 396 | mad_reg_req)) |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 397 | goto error5; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | } |
| 399 | } |
| 400 | ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, |
| 401 | mgmt_class); |
| 402 | } else { |
| 403 | /* "New" vendor class range */ |
| 404 | vendor = port_priv->version[mad_reg_req-> |
| 405 | mgmt_class_version].vendor; |
| 406 | if (vendor) { |
| 407 | vclass = vendor_class_index(mgmt_class); |
| 408 | vendor_class = vendor->vendor_class[vclass]; |
| 409 | if (vendor_class) { |
| 410 | if (is_vendor_method_in_use( |
| 411 | vendor_class, |
| 412 | mad_reg_req)) |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 413 | goto error5; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | } |
| 415 | } |
| 416 | ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); |
| 417 | } |
| 418 | if (ret2) { |
| 419 | ret = ERR_PTR(ret2); |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 420 | goto error5; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | } |
| 422 | } |
| 423 | |
| 424 | /* Add mad agent into port's agent list */ |
| 425 | list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); |
| 426 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); |
| 427 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | return &mad_agent_priv->agent; |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 429 | error5: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 431 | ib_mad_agent_security_cleanup(&mad_agent_priv->agent); |
| 432 | error4: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | kfree(reg_req); |
Hal Rosenstock | b82cab6 | 2005-07-27 11:45:22 -0700 | [diff] [blame] | 434 | error3: |
Adrian Bunk | 2012a11 | 2005-11-27 00:37:36 +0100 | [diff] [blame] | 435 | kfree(mad_agent_priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | error1: |
| 437 | return ret; |
| 438 | } |
| 439 | EXPORT_SYMBOL(ib_register_mad_agent); |
| 440 | |
| 441 | static inline int is_snooping_sends(int mad_snoop_flags) |
| 442 | { |
| 443 | return (mad_snoop_flags & |
| 444 | (/*IB_MAD_SNOOP_POSTED_SENDS | |
| 445 | IB_MAD_SNOOP_RMPP_SENDS |*/ |
| 446 | IB_MAD_SNOOP_SEND_COMPLETIONS /*| |
| 447 | IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/)); |
| 448 | } |
| 449 | |
| 450 | static inline int is_snooping_recvs(int mad_snoop_flags) |
| 451 | { |
| 452 | return (mad_snoop_flags & |
| 453 | (IB_MAD_SNOOP_RECVS /*| |
| 454 | IB_MAD_SNOOP_RMPP_RECVS*/)); |
| 455 | } |
| 456 | |
| 457 | static int register_snoop_agent(struct ib_mad_qp_info *qp_info, |
| 458 | struct ib_mad_snoop_private *mad_snoop_priv) |
| 459 | { |
| 460 | struct ib_mad_snoop_private **new_snoop_table; |
| 461 | unsigned long flags; |
| 462 | int i; |
| 463 | |
| 464 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
| 465 | /* Check for empty slot in array. */ |
| 466 | for (i = 0; i < qp_info->snoop_table_size; i++) |
| 467 | if (!qp_info->snoop_table[i]) |
| 468 | break; |
| 469 | |
| 470 | if (i == qp_info->snoop_table_size) { |
| 471 | /* Grow table. */ |
Roland Dreier | 52805174 | 2008-10-14 14:05:36 -0700 | [diff] [blame] | 472 | new_snoop_table = krealloc(qp_info->snoop_table, |
| 473 | sizeof mad_snoop_priv * |
| 474 | (qp_info->snoop_table_size + 1), |
| 475 | GFP_ATOMIC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | if (!new_snoop_table) { |
| 477 | i = -ENOMEM; |
| 478 | goto out; |
| 479 | } |
Roland Dreier | 52805174 | 2008-10-14 14:05:36 -0700 | [diff] [blame] | 480 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | qp_info->snoop_table = new_snoop_table; |
| 482 | qp_info->snoop_table_size++; |
| 483 | } |
| 484 | qp_info->snoop_table[i] = mad_snoop_priv; |
| 485 | atomic_inc(&qp_info->snoop_count); |
| 486 | out: |
| 487 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
| 488 | return i; |
| 489 | } |
| 490 | |
| 491 | struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, |
| 492 | u8 port_num, |
| 493 | enum ib_qp_type qp_type, |
| 494 | int mad_snoop_flags, |
| 495 | ib_mad_snoop_handler snoop_handler, |
| 496 | ib_mad_recv_handler recv_handler, |
| 497 | void *context) |
| 498 | { |
| 499 | struct ib_mad_port_private *port_priv; |
| 500 | struct ib_mad_agent *ret; |
| 501 | struct ib_mad_snoop_private *mad_snoop_priv; |
| 502 | int qpn; |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 503 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | |
| 505 | /* Validate parameters */ |
| 506 | if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || |
| 507 | (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) { |
| 508 | ret = ERR_PTR(-EINVAL); |
| 509 | goto error1; |
| 510 | } |
| 511 | qpn = get_spl_qp_index(qp_type); |
| 512 | if (qpn == -1) { |
| 513 | ret = ERR_PTR(-EINVAL); |
| 514 | goto error1; |
| 515 | } |
| 516 | port_priv = ib_get_mad_port(device, port_num); |
| 517 | if (!port_priv) { |
| 518 | ret = ERR_PTR(-ENODEV); |
| 519 | goto error1; |
| 520 | } |
| 521 | /* Allocate structures */ |
Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 522 | mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | if (!mad_snoop_priv) { |
| 524 | ret = ERR_PTR(-ENOMEM); |
| 525 | goto error1; |
| 526 | } |
| 527 | |
| 528 | /* Now, fill in the various structures */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; |
| 530 | mad_snoop_priv->agent.device = device; |
| 531 | mad_snoop_priv->agent.recv_handler = recv_handler; |
| 532 | mad_snoop_priv->agent.snoop_handler = snoop_handler; |
| 533 | mad_snoop_priv->agent.context = context; |
| 534 | mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; |
| 535 | mad_snoop_priv->agent.port_num = port_num; |
| 536 | mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; |
Sean Hefty | 1b52fa98 | 2006-05-12 14:57:52 -0700 | [diff] [blame] | 537 | init_completion(&mad_snoop_priv->comp); |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 538 | |
| 539 | err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type); |
| 540 | if (err) { |
| 541 | ret = ERR_PTR(err); |
| 542 | goto error2; |
| 543 | } |
| 544 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | mad_snoop_priv->snoop_index = register_snoop_agent( |
| 546 | &port_priv->qp_info[qpn], |
| 547 | mad_snoop_priv); |
| 548 | if (mad_snoop_priv->snoop_index < 0) { |
| 549 | ret = ERR_PTR(mad_snoop_priv->snoop_index); |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 550 | goto error3; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | } |
| 552 | |
| 553 | atomic_set(&mad_snoop_priv->refcount, 1); |
| 554 | return &mad_snoop_priv->agent; |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 555 | error3: |
| 556 | ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | error2: |
| 558 | kfree(mad_snoop_priv); |
| 559 | error1: |
| 560 | return ret; |
| 561 | } |
| 562 | EXPORT_SYMBOL(ib_register_mad_snoop); |
| 563 | |
Sean Hefty | 1b52fa98 | 2006-05-12 14:57:52 -0700 | [diff] [blame] | 564 | static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) |
| 565 | { |
| 566 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) |
| 567 | complete(&mad_agent_priv->comp); |
| 568 | } |
| 569 | |
| 570 | static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) |
| 571 | { |
| 572 | if (atomic_dec_and_test(&mad_snoop_priv->refcount)) |
| 573 | complete(&mad_snoop_priv->comp); |
| 574 | } |
| 575 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) |
| 577 | { |
| 578 | struct ib_mad_port_private *port_priv; |
| 579 | unsigned long flags; |
| 580 | |
| 581 | /* Note that we could still be handling received MADs */ |
| 582 | |
| 583 | /* |
| 584 | * Canceling all sends results in dropping received response |
| 585 | * MADs, preventing us from queuing additional work |
| 586 | */ |
| 587 | cancel_mads(mad_agent_priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 588 | port_priv = mad_agent_priv->qp_info->port_priv; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | cancel_delayed_work(&mad_agent_priv->timed_work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 | |
| 591 | spin_lock_irqsave(&port_priv->reg_lock, flags); |
| 592 | remove_mad_reg_req(mad_agent_priv); |
| 593 | list_del(&mad_agent_priv->agent_list); |
| 594 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); |
| 595 | |
Hal Rosenstock | b82cab6 | 2005-07-27 11:45:22 -0700 | [diff] [blame] | 596 | flush_workqueue(port_priv->wq); |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 597 | ib_cancel_rmpp_recvs(mad_agent_priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | |
Sean Hefty | 1b52fa98 | 2006-05-12 14:57:52 -0700 | [diff] [blame] | 599 | deref_mad_agent(mad_agent_priv); |
| 600 | wait_for_completion(&mad_agent_priv->comp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 602 | ib_mad_agent_security_cleanup(&mad_agent_priv->agent); |
| 603 | |
Jesper Juhl | 6044ec8 | 2005-11-07 01:01:32 -0800 | [diff] [blame] | 604 | kfree(mad_agent_priv->reg_req); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | kfree(mad_agent_priv); |
| 606 | } |
| 607 | |
| 608 | static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) |
| 609 | { |
| 610 | struct ib_mad_qp_info *qp_info; |
| 611 | unsigned long flags; |
| 612 | |
| 613 | qp_info = mad_snoop_priv->qp_info; |
| 614 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
| 615 | qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; |
| 616 | atomic_dec(&qp_info->snoop_count); |
| 617 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
| 618 | |
Sean Hefty | 1b52fa98 | 2006-05-12 14:57:52 -0700 | [diff] [blame] | 619 | deref_snoop_agent(mad_snoop_priv); |
| 620 | wait_for_completion(&mad_snoop_priv->comp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 621 | |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 622 | ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); |
| 623 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | kfree(mad_snoop_priv); |
| 625 | } |
| 626 | |
| 627 | /* |
| 628 | * ib_unregister_mad_agent - Unregisters a client from using MAD services |
| 629 | */ |
Zhu Yanjun | 8d2216b | 2017-03-31 23:42:55 -0400 | [diff] [blame] | 630 | void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | { |
| 632 | struct ib_mad_agent_private *mad_agent_priv; |
| 633 | struct ib_mad_snoop_private *mad_snoop_priv; |
| 634 | |
| 635 | /* If the TID is zero, the agent can only snoop. */ |
| 636 | if (mad_agent->hi_tid) { |
| 637 | mad_agent_priv = container_of(mad_agent, |
| 638 | struct ib_mad_agent_private, |
| 639 | agent); |
| 640 | unregister_mad_agent(mad_agent_priv); |
| 641 | } else { |
| 642 | mad_snoop_priv = container_of(mad_agent, |
| 643 | struct ib_mad_snoop_private, |
| 644 | agent); |
| 645 | unregister_mad_snoop(mad_snoop_priv); |
| 646 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | } |
| 648 | EXPORT_SYMBOL(ib_unregister_mad_agent); |
| 649 | |
| 650 | static void dequeue_mad(struct ib_mad_list_head *mad_list) |
| 651 | { |
| 652 | struct ib_mad_queue *mad_queue; |
| 653 | unsigned long flags; |
| 654 | |
| 655 | BUG_ON(!mad_list->mad_queue); |
| 656 | mad_queue = mad_list->mad_queue; |
| 657 | spin_lock_irqsave(&mad_queue->lock, flags); |
| 658 | list_del(&mad_list->list); |
| 659 | mad_queue->count--; |
| 660 | spin_unlock_irqrestore(&mad_queue->lock, flags); |
| 661 | } |
| 662 | |
| 663 | static void snoop_send(struct ib_mad_qp_info *qp_info, |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 664 | struct ib_mad_send_buf *send_buf, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | struct ib_mad_send_wc *mad_send_wc, |
| 666 | int mad_snoop_flags) |
| 667 | { |
| 668 | struct ib_mad_snoop_private *mad_snoop_priv; |
| 669 | unsigned long flags; |
| 670 | int i; |
| 671 | |
| 672 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
| 673 | for (i = 0; i < qp_info->snoop_table_size; i++) { |
| 674 | mad_snoop_priv = qp_info->snoop_table[i]; |
| 675 | if (!mad_snoop_priv || |
| 676 | !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) |
| 677 | continue; |
| 678 | |
| 679 | atomic_inc(&mad_snoop_priv->refcount); |
| 680 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
| 681 | mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 682 | send_buf, mad_send_wc); |
Sean Hefty | 1b52fa98 | 2006-05-12 14:57:52 -0700 | [diff] [blame] | 683 | deref_snoop_agent(mad_snoop_priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
| 685 | } |
| 686 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
| 687 | } |
| 688 | |
| 689 | static void snoop_recv(struct ib_mad_qp_info *qp_info, |
| 690 | struct ib_mad_recv_wc *mad_recv_wc, |
| 691 | int mad_snoop_flags) |
| 692 | { |
| 693 | struct ib_mad_snoop_private *mad_snoop_priv; |
| 694 | unsigned long flags; |
| 695 | int i; |
| 696 | |
| 697 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
| 698 | for (i = 0; i < qp_info->snoop_table_size; i++) { |
| 699 | mad_snoop_priv = qp_info->snoop_table[i]; |
| 700 | if (!mad_snoop_priv || |
| 701 | !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) |
| 702 | continue; |
| 703 | |
| 704 | atomic_inc(&mad_snoop_priv->refcount); |
| 705 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
Christoph Hellwig | ca28126 | 2016-01-04 14:15:58 +0100 | [diff] [blame] | 706 | mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 707 | mad_recv_wc); |
Sean Hefty | 1b52fa98 | 2006-05-12 14:57:52 -0700 | [diff] [blame] | 708 | deref_snoop_agent(mad_snoop_priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 709 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
| 710 | } |
| 711 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
| 712 | } |
| 713 | |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 714 | static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid, |
| 715 | u16 pkey_index, u8 port_num, struct ib_wc *wc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | { |
| 717 | memset(wc, 0, sizeof *wc); |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 718 | wc->wr_cqe = cqe; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 719 | wc->status = IB_WC_SUCCESS; |
| 720 | wc->opcode = IB_WC_RECV; |
| 721 | wc->pkey_index = pkey_index; |
| 722 | wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); |
| 723 | wc->src_qp = IB_QP0; |
Michael S. Tsirkin | 062dbb6 | 2006-12-31 21:09:42 +0200 | [diff] [blame] | 724 | wc->qp = qp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 725 | wc->slid = slid; |
| 726 | wc->sl = 0; |
| 727 | wc->dlid_path_bits = 0; |
| 728 | wc->port_num = port_num; |
| 729 | } |
| 730 | |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 731 | static size_t mad_priv_size(const struct ib_mad_private *mp) |
| 732 | { |
| 733 | return sizeof(struct ib_mad_private) + mp->mad_size; |
| 734 | } |
| 735 | |
| 736 | static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags) |
| 737 | { |
| 738 | size_t size = sizeof(struct ib_mad_private) + mad_size; |
| 739 | struct ib_mad_private *ret = kzalloc(size, flags); |
| 740 | |
| 741 | if (ret) |
| 742 | ret->mad_size = mad_size; |
| 743 | |
| 744 | return ret; |
| 745 | } |
| 746 | |
| 747 | static size_t port_mad_size(const struct ib_mad_port_private *port_priv) |
| 748 | { |
| 749 | return rdma_max_mad_size(port_priv->device, port_priv->port_num); |
| 750 | } |
| 751 | |
| 752 | static size_t mad_priv_dma_size(const struct ib_mad_private *mp) |
| 753 | { |
| 754 | return sizeof(struct ib_grh) + mp->mad_size; |
| 755 | } |
| 756 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | /* |
| 758 | * Return 0 if SMP is to be sent |
| 759 | * Return 1 if SMP was consumed locally (whether or not solicited) |
| 760 | * Return < 0 if error |
| 761 | */ |
| 762 | static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 763 | struct ib_mad_send_wr_private *mad_send_wr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | { |
Hal Rosenstock | de493d4 | 2007-04-02 11:24:07 -0400 | [diff] [blame] | 765 | int ret = 0; |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 766 | struct ib_smp *smp = mad_send_wr->send_buf.mad; |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 767 | struct opa_smp *opa_smp = (struct opa_smp *)smp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 768 | unsigned long flags; |
| 769 | struct ib_mad_local_private *local; |
| 770 | struct ib_mad_private *mad_priv; |
| 771 | struct ib_mad_port_private *port_priv; |
| 772 | struct ib_mad_agent_private *recv_mad_agent = NULL; |
| 773 | struct ib_device *device = mad_agent_priv->agent.device; |
Hal Rosenstock | 1bae4db | 2007-05-14 17:21:52 -0400 | [diff] [blame] | 774 | u8 port_num; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 | struct ib_wc mad_wc; |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 776 | struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 777 | size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); |
Ira Weiny | 4cd7c94 | 2015-06-06 14:38:31 -0400 | [diff] [blame] | 778 | u16 out_mad_pkey_index = 0; |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 779 | u16 drslid; |
| 780 | bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, |
| 781 | mad_agent_priv->qp_info->port_priv->port_num); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 783 | if (rdma_cap_ib_switch(device) && |
Hal Rosenstock | 1bae4db | 2007-05-14 17:21:52 -0400 | [diff] [blame] | 784 | smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 785 | port_num = send_wr->port_num; |
Hal Rosenstock | 1bae4db | 2007-05-14 17:21:52 -0400 | [diff] [blame] | 786 | else |
| 787 | port_num = mad_agent_priv->agent.port_num; |
| 788 | |
Ralph Campbell | 8cf3f04 | 2006-02-03 14:28:48 -0800 | [diff] [blame] | 789 | /* |
| 790 | * Directed route handling starts if the initial LID routed part of |
| 791 | * a request or the ending LID routed part of a response is empty. |
| 792 | * If we are at the start of the LID routed part, don't update the |
| 793 | * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. |
| 794 | */ |
Hal Rosenstock | 9fa240b | 2016-10-18 13:20:29 -0400 | [diff] [blame] | 795 | if (opa && smp->class_version == OPA_SM_CLASS_VERSION) { |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 796 | u32 opa_drslid; |
Hal Rosenstock | de493d4 | 2007-04-02 11:24:07 -0400 | [diff] [blame] | 797 | |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 798 | if ((opa_get_smp_direction(opa_smp) |
| 799 | ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == |
| 800 | OPA_LID_PERMISSIVE && |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 801 | opa_smi_handle_dr_smp_send(opa_smp, |
| 802 | rdma_cap_ib_switch(device), |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 803 | port_num) == IB_SMI_DISCARD) { |
| 804 | ret = -EINVAL; |
| 805 | dev_err(&device->dev, "OPA Invalid directed route\n"); |
| 806 | goto out; |
| 807 | } |
| 808 | opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); |
Ira Weiny | cd4cd56 | 2015-06-25 12:04:49 -0400 | [diff] [blame] | 809 | if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) && |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 810 | opa_drslid & 0xffff0000) { |
| 811 | ret = -EINVAL; |
| 812 | dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", |
| 813 | opa_drslid); |
| 814 | goto out; |
| 815 | } |
| 816 | drslid = (u16)(opa_drslid & 0x0000ffff); |
| 817 | |
| 818 | /* Check to post send on QP or process locally */ |
| 819 | if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD && |
| 820 | opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD) |
| 821 | goto out; |
| 822 | } else { |
| 823 | if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == |
| 824 | IB_LID_PERMISSIVE && |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 825 | smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) == |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 826 | IB_SMI_DISCARD) { |
| 827 | ret = -EINVAL; |
| 828 | dev_err(&device->dev, "Invalid directed route\n"); |
| 829 | goto out; |
| 830 | } |
| 831 | drslid = be16_to_cpu(smp->dr_slid); |
| 832 | |
| 833 | /* Check to post send on QP or process locally */ |
| 834 | if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && |
| 835 | smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) |
| 836 | goto out; |
| 837 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | |
| 839 | local = kmalloc(sizeof *local, GFP_ATOMIC); |
| 840 | if (!local) { |
| 841 | ret = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 842 | goto out; |
| 843 | } |
| 844 | local->mad_priv = NULL; |
| 845 | local->recv_mad_agent = NULL; |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 846 | mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 847 | if (!mad_priv) { |
| 848 | ret = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 849 | kfree(local); |
| 850 | goto out; |
| 851 | } |
| 852 | |
Michael S. Tsirkin | 062dbb6 | 2006-12-31 21:09:42 +0200 | [diff] [blame] | 853 | build_smp_wc(mad_agent_priv->agent.qp, |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 854 | send_wr->wr.wr_cqe, drslid, |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 855 | send_wr->pkey_index, |
| 856 | send_wr->port_num, &mad_wc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 857 | |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 858 | if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { |
| 859 | mad_wc.byte_len = mad_send_wr->send_buf.hdr_len |
| 860 | + mad_send_wr->send_buf.data_len |
| 861 | + sizeof(struct ib_grh); |
| 862 | } |
| 863 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 864 | /* No GRH for DR SMP */ |
| 865 | ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, |
Ira Weiny | 4cd7c94 | 2015-06-06 14:38:31 -0400 | [diff] [blame] | 866 | (const struct ib_mad_hdr *)smp, mad_size, |
| 867 | (struct ib_mad_hdr *)mad_priv->mad, |
| 868 | &mad_size, &out_mad_pkey_index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 869 | switch (ret) |
| 870 | { |
| 871 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 872 | if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 873 | mad_agent_priv->agent.recv_handler) { |
| 874 | local->mad_priv = mad_priv; |
| 875 | local->recv_mad_agent = mad_agent_priv; |
| 876 | /* |
| 877 | * Reference MAD agent until receive |
| 878 | * side of local completion handled |
| 879 | */ |
| 880 | atomic_inc(&mad_agent_priv->refcount); |
| 881 | } else |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 882 | kfree(mad_priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 883 | break; |
| 884 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 885 | kfree(mad_priv); |
Ralph Campbell | 4780c19 | 2009-03-03 14:22:17 -0800 | [diff] [blame] | 886 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 887 | case IB_MAD_RESULT_SUCCESS: |
| 888 | /* Treat like an incoming receive MAD */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 889 | port_priv = ib_get_mad_port(mad_agent_priv->agent.device, |
| 890 | mad_agent_priv->agent.port_num); |
| 891 | if (port_priv) { |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 892 | memcpy(mad_priv->mad, smp, mad_priv->mad_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 893 | recv_mad_agent = find_mad_agent(port_priv, |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 894 | (const struct ib_mad_hdr *)mad_priv->mad); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | } |
| 896 | if (!port_priv || !recv_mad_agent) { |
Ralph Campbell | 4780c19 | 2009-03-03 14:22:17 -0800 | [diff] [blame] | 897 | /* |
| 898 | * No receiving agent so drop packet and |
| 899 | * generate send completion. |
| 900 | */ |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 901 | kfree(mad_priv); |
Ralph Campbell | 4780c19 | 2009-03-03 14:22:17 -0800 | [diff] [blame] | 902 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | } |
| 904 | local->mad_priv = mad_priv; |
| 905 | local->recv_mad_agent = recv_mad_agent; |
| 906 | break; |
| 907 | default: |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 908 | kfree(mad_priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 909 | kfree(local); |
| 910 | ret = -EINVAL; |
| 911 | goto out; |
| 912 | } |
| 913 | |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 914 | local->mad_send_wr = mad_send_wr; |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 915 | if (opa) { |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 916 | local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index; |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 917 | local->return_wc_byte_len = mad_size; |
| 918 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 | /* Reference MAD agent until send side of local completion handled */ |
| 920 | atomic_inc(&mad_agent_priv->refcount); |
| 921 | /* Queue local completion to local list */ |
| 922 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
| 923 | list_add_tail(&local->completion_list, &mad_agent_priv->local_list); |
| 924 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
| 925 | queue_work(mad_agent_priv->qp_info->port_priv->wq, |
Hal Rosenstock | b82cab6 | 2005-07-27 11:45:22 -0700 | [diff] [blame] | 926 | &mad_agent_priv->local_work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 927 | ret = 1; |
| 928 | out: |
| 929 | return ret; |
| 930 | } |
| 931 | |
Ira Weiny | 548ead1 | 2015-06-06 14:38:33 -0400 | [diff] [blame] | 932 | static int get_pad_size(int hdr_len, int data_len, size_t mad_size) |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 933 | { |
| 934 | int seg_size, pad; |
| 935 | |
Ira Weiny | 548ead1 | 2015-06-06 14:38:33 -0400 | [diff] [blame] | 936 | seg_size = mad_size - hdr_len; |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 937 | if (data_len && seg_size) { |
| 938 | pad = seg_size - data_len % seg_size; |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 939 | return pad == seg_size ? 0 : pad; |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 940 | } else |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 941 | return seg_size; |
| 942 | } |
| 943 | |
| 944 | static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) |
| 945 | { |
| 946 | struct ib_rmpp_segment *s, *t; |
| 947 | |
| 948 | list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { |
| 949 | list_del(&s->list); |
| 950 | kfree(s); |
| 951 | } |
| 952 | } |
| 953 | |
| 954 | static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, |
Ira Weiny | 548ead1 | 2015-06-06 14:38:33 -0400 | [diff] [blame] | 955 | size_t mad_size, gfp_t gfp_mask) |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 956 | { |
| 957 | struct ib_mad_send_buf *send_buf = &send_wr->send_buf; |
| 958 | struct ib_rmpp_mad *rmpp_mad = send_buf->mad; |
| 959 | struct ib_rmpp_segment *seg = NULL; |
| 960 | int left, seg_size, pad; |
| 961 | |
Ira Weiny | 548ead1 | 2015-06-06 14:38:33 -0400 | [diff] [blame] | 962 | send_buf->seg_size = mad_size - send_buf->hdr_len; |
| 963 | send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR; |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 964 | seg_size = send_buf->seg_size; |
| 965 | pad = send_wr->pad; |
| 966 | |
| 967 | /* Allocate data segments. */ |
| 968 | for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { |
| 969 | seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); |
| 970 | if (!seg) { |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 971 | free_send_rmpp_list(send_wr); |
| 972 | return -ENOMEM; |
| 973 | } |
| 974 | seg->num = ++send_buf->seg_count; |
| 975 | list_add_tail(&seg->list, &send_wr->rmpp_list); |
| 976 | } |
| 977 | |
| 978 | /* Zero any padding */ |
| 979 | if (pad) |
| 980 | memset(seg->data + seg_size - pad, 0, pad); |
| 981 | |
| 982 | rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> |
| 983 | agent.rmpp_version; |
| 984 | rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; |
| 985 | ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); |
| 986 | |
| 987 | send_wr->cur_seg = container_of(send_wr->rmpp_list.next, |
| 988 | struct ib_rmpp_segment, list); |
| 989 | send_wr->last_ack_seg = send_wr->cur_seg; |
| 990 | return 0; |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 991 | } |
| 992 | |
Ira Weiny | f766c58 | 2015-05-08 14:27:24 -0400 | [diff] [blame] | 993 | int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) |
Ira Weiny | 1471cb6 | 2014-08-08 19:00:56 -0400 | [diff] [blame] | 994 | { |
| 995 | return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); |
| 996 | } |
| 997 | EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); |
| 998 | |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 999 | struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, |
| 1000 | u32 remote_qpn, u16 pkey_index, |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1001 | int rmpp_active, |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 1002 | int hdr_len, int data_len, |
Ira Weiny | da2dfaa | 2015-06-06 14:38:28 -0400 | [diff] [blame] | 1003 | gfp_t gfp_mask, |
| 1004 | u8 base_version) |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 1005 | { |
| 1006 | struct ib_mad_agent_private *mad_agent_priv; |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1007 | struct ib_mad_send_wr_private *mad_send_wr; |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 1008 | int pad, message_size, ret, size; |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 1009 | void *buf; |
Ira Weiny | 548ead1 | 2015-06-06 14:38:33 -0400 | [diff] [blame] | 1010 | size_t mad_size; |
| 1011 | bool opa; |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 1012 | |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1013 | mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, |
| 1014 | agent); |
Ira Weiny | 548ead1 | 2015-06-06 14:38:33 -0400 | [diff] [blame] | 1015 | |
| 1016 | opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num); |
| 1017 | |
| 1018 | if (opa && base_version == OPA_MGMT_BASE_VERSION) |
| 1019 | mad_size = sizeof(struct opa_mad); |
| 1020 | else |
| 1021 | mad_size = sizeof(struct ib_mad); |
| 1022 | |
| 1023 | pad = get_pad_size(hdr_len, data_len, mad_size); |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 1024 | message_size = hdr_len + data_len + pad; |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 1025 | |
Ira Weiny | 1471cb6 | 2014-08-08 19:00:56 -0400 | [diff] [blame] | 1026 | if (ib_mad_kernel_rmpp_agent(mad_agent)) { |
Ira Weiny | 548ead1 | 2015-06-06 14:38:33 -0400 | [diff] [blame] | 1027 | if (!rmpp_active && message_size > mad_size) |
Ira Weiny | 1471cb6 | 2014-08-08 19:00:56 -0400 | [diff] [blame] | 1028 | return ERR_PTR(-EINVAL); |
| 1029 | } else |
Ira Weiny | 548ead1 | 2015-06-06 14:38:33 -0400 | [diff] [blame] | 1030 | if (rmpp_active || message_size > mad_size) |
Ira Weiny | 1471cb6 | 2014-08-08 19:00:56 -0400 | [diff] [blame] | 1031 | return ERR_PTR(-EINVAL); |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1032 | |
Ira Weiny | 548ead1 | 2015-06-06 14:38:33 -0400 | [diff] [blame] | 1033 | size = rmpp_active ? hdr_len : mad_size; |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 1034 | buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 1035 | if (!buf) |
| 1036 | return ERR_PTR(-ENOMEM); |
| 1037 | |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 1038 | mad_send_wr = buf + size; |
| 1039 | INIT_LIST_HEAD(&mad_send_wr->rmpp_list); |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1040 | mad_send_wr->send_buf.mad = buf; |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 1041 | mad_send_wr->send_buf.hdr_len = hdr_len; |
| 1042 | mad_send_wr->send_buf.data_len = data_len; |
| 1043 | mad_send_wr->pad = pad; |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 1044 | |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1045 | mad_send_wr->mad_agent_priv = mad_agent_priv; |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 1046 | mad_send_wr->sg_list[0].length = hdr_len; |
Jason Gunthorpe | 4be90bc | 2015-07-30 17:22:16 -0600 | [diff] [blame] | 1047 | mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; |
Ira Weiny | 548ead1 | 2015-06-06 14:38:33 -0400 | [diff] [blame] | 1048 | |
| 1049 | /* OPA MADs don't have to be the full 2048 bytes */ |
| 1050 | if (opa && base_version == OPA_MGMT_BASE_VERSION && |
| 1051 | data_len < mad_size - hdr_len) |
| 1052 | mad_send_wr->sg_list[1].length = data_len; |
| 1053 | else |
| 1054 | mad_send_wr->sg_list[1].length = mad_size - hdr_len; |
| 1055 | |
Jason Gunthorpe | 4be90bc | 2015-07-30 17:22:16 -0600 | [diff] [blame] | 1056 | mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 1057 | |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 1058 | mad_send_wr->mad_list.cqe.done = ib_mad_send_done; |
| 1059 | |
| 1060 | mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 1061 | mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; |
| 1062 | mad_send_wr->send_wr.wr.num_sge = 2; |
| 1063 | mad_send_wr->send_wr.wr.opcode = IB_WR_SEND; |
| 1064 | mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED; |
| 1065 | mad_send_wr->send_wr.remote_qpn = remote_qpn; |
| 1066 | mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY; |
| 1067 | mad_send_wr->send_wr.pkey_index = pkey_index; |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1068 | |
| 1069 | if (rmpp_active) { |
Ira Weiny | 548ead1 | 2015-06-06 14:38:33 -0400 | [diff] [blame] | 1070 | ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 1071 | if (ret) { |
| 1072 | kfree(buf); |
| 1073 | return ERR_PTR(ret); |
| 1074 | } |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1075 | } |
| 1076 | |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1077 | mad_send_wr->send_buf.mad_agent = mad_agent; |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 1078 | atomic_inc(&mad_agent_priv->refcount); |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1079 | return &mad_send_wr->send_buf; |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 1080 | } |
| 1081 | EXPORT_SYMBOL(ib_create_send_mad); |
| 1082 | |
Hal Rosenstock | 618a3c0 | 2006-03-28 16:40:04 -0800 | [diff] [blame] | 1083 | int ib_get_mad_data_offset(u8 mgmt_class) |
| 1084 | { |
| 1085 | if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) |
| 1086 | return IB_MGMT_SA_HDR; |
| 1087 | else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || |
| 1088 | (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || |
| 1089 | (mgmt_class == IB_MGMT_CLASS_BIS)) |
| 1090 | return IB_MGMT_DEVICE_HDR; |
| 1091 | else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && |
| 1092 | (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) |
| 1093 | return IB_MGMT_VENDOR_HDR; |
| 1094 | else |
| 1095 | return IB_MGMT_MAD_HDR; |
| 1096 | } |
| 1097 | EXPORT_SYMBOL(ib_get_mad_data_offset); |
| 1098 | |
| 1099 | int ib_is_mad_class_rmpp(u8 mgmt_class) |
| 1100 | { |
| 1101 | if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || |
| 1102 | (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || |
| 1103 | (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || |
| 1104 | (mgmt_class == IB_MGMT_CLASS_BIS) || |
| 1105 | ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && |
| 1106 | (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) |
| 1107 | return 1; |
| 1108 | return 0; |
| 1109 | } |
| 1110 | EXPORT_SYMBOL(ib_is_mad_class_rmpp); |
| 1111 | |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 1112 | void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) |
| 1113 | { |
| 1114 | struct ib_mad_send_wr_private *mad_send_wr; |
| 1115 | struct list_head *list; |
| 1116 | |
| 1117 | mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, |
| 1118 | send_buf); |
| 1119 | list = &mad_send_wr->cur_seg->list; |
| 1120 | |
| 1121 | if (mad_send_wr->cur_seg->num < seg_num) { |
| 1122 | list_for_each_entry(mad_send_wr->cur_seg, list, list) |
| 1123 | if (mad_send_wr->cur_seg->num == seg_num) |
| 1124 | break; |
| 1125 | } else if (mad_send_wr->cur_seg->num > seg_num) { |
| 1126 | list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list) |
| 1127 | if (mad_send_wr->cur_seg->num == seg_num) |
| 1128 | break; |
| 1129 | } |
| 1130 | return mad_send_wr->cur_seg->data; |
| 1131 | } |
| 1132 | EXPORT_SYMBOL(ib_get_rmpp_segment); |
| 1133 | |
| 1134 | static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) |
| 1135 | { |
| 1136 | if (mad_send_wr->send_buf.seg_count) |
| 1137 | return ib_get_rmpp_segment(&mad_send_wr->send_buf, |
| 1138 | mad_send_wr->seg_num); |
| 1139 | else |
| 1140 | return mad_send_wr->send_buf.mad + |
| 1141 | mad_send_wr->send_buf.hdr_len; |
| 1142 | } |
| 1143 | |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 1144 | void ib_free_send_mad(struct ib_mad_send_buf *send_buf) |
| 1145 | { |
| 1146 | struct ib_mad_agent_private *mad_agent_priv; |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 1147 | struct ib_mad_send_wr_private *mad_send_wr; |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 1148 | |
| 1149 | mad_agent_priv = container_of(send_buf->mad_agent, |
| 1150 | struct ib_mad_agent_private, agent); |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 1151 | mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, |
| 1152 | send_buf); |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 1153 | |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 1154 | free_send_rmpp_list(mad_send_wr); |
| 1155 | kfree(send_buf->mad); |
Sean Hefty | 1b52fa98 | 2006-05-12 14:57:52 -0700 | [diff] [blame] | 1156 | deref_mad_agent(mad_agent_priv); |
Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 1157 | } |
| 1158 | EXPORT_SYMBOL(ib_free_send_mad); |
| 1159 | |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1160 | int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1161 | { |
| 1162 | struct ib_mad_qp_info *qp_info; |
Hal Rosenstock | cabe3cb | 2005-07-27 11:45:33 -0700 | [diff] [blame] | 1163 | struct list_head *list; |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1164 | struct ib_send_wr *bad_send_wr; |
| 1165 | struct ib_mad_agent *mad_agent; |
| 1166 | struct ib_sge *sge; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1167 | unsigned long flags; |
| 1168 | int ret; |
| 1169 | |
Hal Rosenstock | f8197a4 | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 1170 | /* Set WR ID to find mad_send_wr upon completion */ |
Hal Rosenstock | d760ce8 | 2005-07-27 11:45:25 -0700 | [diff] [blame] | 1171 | qp_info = mad_send_wr->mad_agent_priv->qp_info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1172 | mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 1173 | mad_send_wr->mad_list.cqe.done = ib_mad_send_done; |
| 1174 | mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1175 | |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1176 | mad_agent = mad_send_wr->send_buf.mad_agent; |
| 1177 | sge = mad_send_wr->sg_list; |
Ralph Campbell | 1527106 | 2006-12-12 14:28:30 -0800 | [diff] [blame] | 1178 | sge[0].addr = ib_dma_map_single(mad_agent->device, |
| 1179 | mad_send_wr->send_buf.mad, |
| 1180 | sge[0].length, |
| 1181 | DMA_TO_DEVICE); |
Yan Burman | 2c34e68 | 2014-03-11 14:41:47 +0200 | [diff] [blame] | 1182 | if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) |
| 1183 | return -ENOMEM; |
| 1184 | |
Ralph Campbell | 1527106 | 2006-12-12 14:28:30 -0800 | [diff] [blame] | 1185 | mad_send_wr->header_mapping = sge[0].addr; |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 1186 | |
Ralph Campbell | 1527106 | 2006-12-12 14:28:30 -0800 | [diff] [blame] | 1187 | sge[1].addr = ib_dma_map_single(mad_agent->device, |
| 1188 | ib_get_payload(mad_send_wr), |
| 1189 | sge[1].length, |
| 1190 | DMA_TO_DEVICE); |
Yan Burman | 2c34e68 | 2014-03-11 14:41:47 +0200 | [diff] [blame] | 1191 | if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { |
| 1192 | ib_dma_unmap_single(mad_agent->device, |
| 1193 | mad_send_wr->header_mapping, |
| 1194 | sge[0].length, DMA_TO_DEVICE); |
| 1195 | return -ENOMEM; |
| 1196 | } |
Ralph Campbell | 1527106 | 2006-12-12 14:28:30 -0800 | [diff] [blame] | 1197 | mad_send_wr->payload_mapping = sge[1].addr; |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1198 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1199 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); |
Hal Rosenstock | cabe3cb | 2005-07-27 11:45:33 -0700 | [diff] [blame] | 1200 | if (qp_info->send_queue.count < qp_info->send_queue.max_active) { |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 1201 | ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1202 | &bad_send_wr); |
Hal Rosenstock | cabe3cb | 2005-07-27 11:45:33 -0700 | [diff] [blame] | 1203 | list = &qp_info->send_queue.list; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1204 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1205 | ret = 0; |
Hal Rosenstock | cabe3cb | 2005-07-27 11:45:33 -0700 | [diff] [blame] | 1206 | list = &qp_info->overflow_list; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1207 | } |
Hal Rosenstock | cabe3cb | 2005-07-27 11:45:33 -0700 | [diff] [blame] | 1208 | |
| 1209 | if (!ret) { |
| 1210 | qp_info->send_queue.count++; |
| 1211 | list_add_tail(&mad_send_wr->mad_list.list, list); |
| 1212 | } |
| 1213 | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 1214 | if (ret) { |
Ralph Campbell | 1527106 | 2006-12-12 14:28:30 -0800 | [diff] [blame] | 1215 | ib_dma_unmap_single(mad_agent->device, |
| 1216 | mad_send_wr->header_mapping, |
| 1217 | sge[0].length, DMA_TO_DEVICE); |
| 1218 | ib_dma_unmap_single(mad_agent->device, |
| 1219 | mad_send_wr->payload_mapping, |
| 1220 | sge[1].length, DMA_TO_DEVICE); |
Jack Morgenstein | f36e179 | 2006-03-03 21:54:13 -0800 | [diff] [blame] | 1221 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1222 | return ret; |
| 1223 | } |
| 1224 | |
| 1225 | /* |
| 1226 | * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated |
| 1227 | * with the registered client |
| 1228 | */ |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1229 | int ib_post_send_mad(struct ib_mad_send_buf *send_buf, |
| 1230 | struct ib_mad_send_buf **bad_send_buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1231 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1232 | struct ib_mad_agent_private *mad_agent_priv; |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1233 | struct ib_mad_send_buf *next_send_buf; |
| 1234 | struct ib_mad_send_wr_private *mad_send_wr; |
| 1235 | unsigned long flags; |
| 1236 | int ret = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1237 | |
| 1238 | /* Walk list of send WRs and post each on send list */ |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1239 | for (; send_buf; send_buf = next_send_buf) { |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1240 | mad_send_wr = container_of(send_buf, |
| 1241 | struct ib_mad_send_wr_private, |
| 1242 | send_buf); |
| 1243 | mad_agent_priv = mad_send_wr->mad_agent_priv; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1244 | |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 1245 | ret = ib_mad_enforce_security(mad_agent_priv, |
| 1246 | mad_send_wr->send_wr.pkey_index); |
| 1247 | if (ret) |
| 1248 | goto error; |
| 1249 | |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1250 | if (!send_buf->mad_agent->send_handler || |
| 1251 | (send_buf->timeout_ms && |
| 1252 | !send_buf->mad_agent->recv_handler)) { |
| 1253 | ret = -EINVAL; |
| 1254 | goto error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1255 | } |
| 1256 | |
Hal Rosenstock | 618a3c0 | 2006-03-28 16:40:04 -0800 | [diff] [blame] | 1257 | if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { |
| 1258 | if (mad_agent_priv->agent.rmpp_version) { |
| 1259 | ret = -EINVAL; |
| 1260 | goto error; |
| 1261 | } |
| 1262 | } |
| 1263 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1264 | /* |
| 1265 | * Save pointer to next work request to post in case the |
| 1266 | * current one completes, and the user modifies the work |
| 1267 | * request associated with the completion |
| 1268 | */ |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1269 | next_send_buf = send_buf->next; |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 1270 | mad_send_wr->send_wr.ah = send_buf->ah; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1271 | |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1272 | if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == |
| 1273 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { |
| 1274 | ret = handle_outgoing_dr_smp(mad_agent_priv, |
| 1275 | mad_send_wr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1276 | if (ret < 0) /* error */ |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1277 | goto error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1278 | else if (ret == 1) /* locally consumed */ |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1279 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1280 | } |
| 1281 | |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1282 | mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1283 | /* Timeout will be updated after send completes */ |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1284 | mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); |
Sean Hefty | 4fc8cd4 | 2007-11-27 00:11:04 -0800 | [diff] [blame] | 1285 | mad_send_wr->max_retries = send_buf->retries; |
| 1286 | mad_send_wr->retries_left = send_buf->retries; |
| 1287 | send_buf->retries = 0; |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1288 | /* Reference for work request to QP + response */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1289 | mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); |
| 1290 | mad_send_wr->status = IB_WC_SUCCESS; |
| 1291 | |
| 1292 | /* Reference MAD agent until send completes */ |
| 1293 | atomic_inc(&mad_agent_priv->refcount); |
| 1294 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
| 1295 | list_add_tail(&mad_send_wr->agent_list, |
| 1296 | &mad_agent_priv->send_list); |
| 1297 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
| 1298 | |
Ira Weiny | 1471cb6 | 2014-08-08 19:00:56 -0400 | [diff] [blame] | 1299 | if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1300 | ret = ib_send_rmpp_mad(mad_send_wr); |
| 1301 | if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) |
| 1302 | ret = ib_send_mad(mad_send_wr); |
| 1303 | } else |
| 1304 | ret = ib_send_mad(mad_send_wr); |
| 1305 | if (ret < 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1306 | /* Fail send request */ |
| 1307 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
| 1308 | list_del(&mad_send_wr->agent_list); |
| 1309 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
| 1310 | atomic_dec(&mad_agent_priv->refcount); |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1311 | goto error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1312 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1313 | } |
| 1314 | return 0; |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1315 | error: |
| 1316 | if (bad_send_buf) |
| 1317 | *bad_send_buf = send_buf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1318 | return ret; |
| 1319 | } |
| 1320 | EXPORT_SYMBOL(ib_post_send_mad); |
| 1321 | |
| 1322 | /* |
| 1323 | * ib_free_recv_mad - Returns data buffers used to receive |
| 1324 | * a MAD to the access layer |
| 1325 | */ |
| 1326 | void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) |
| 1327 | { |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1328 | struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1329 | struct ib_mad_private_header *mad_priv_hdr; |
| 1330 | struct ib_mad_private *priv; |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1331 | struct list_head free_list; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1332 | |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1333 | INIT_LIST_HEAD(&free_list); |
| 1334 | list_splice_init(&mad_recv_wc->rmpp_list, &free_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1335 | |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1336 | list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, |
| 1337 | &free_list, list) { |
| 1338 | mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, |
| 1339 | recv_buf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1340 | mad_priv_hdr = container_of(mad_recv_wc, |
| 1341 | struct ib_mad_private_header, |
| 1342 | recv_wc); |
| 1343 | priv = container_of(mad_priv_hdr, struct ib_mad_private, |
| 1344 | header); |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 1345 | kfree(priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1346 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1347 | } |
| 1348 | EXPORT_SYMBOL(ib_free_recv_mad); |
| 1349 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1350 | struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, |
| 1351 | u8 rmpp_version, |
| 1352 | ib_mad_send_handler send_handler, |
| 1353 | ib_mad_recv_handler recv_handler, |
| 1354 | void *context) |
| 1355 | { |
| 1356 | return ERR_PTR(-EINVAL); /* XXX: for now */ |
| 1357 | } |
| 1358 | EXPORT_SYMBOL(ib_redirect_mad_qp); |
| 1359 | |
| 1360 | int ib_process_mad_wc(struct ib_mad_agent *mad_agent, |
| 1361 | struct ib_wc *wc) |
| 1362 | { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 1363 | dev_err(&mad_agent->device->dev, |
| 1364 | "ib_process_mad_wc() not implemented yet\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1365 | return 0; |
| 1366 | } |
| 1367 | EXPORT_SYMBOL(ib_process_mad_wc); |
| 1368 | |
| 1369 | static int method_in_use(struct ib_mad_mgmt_method_table **method, |
| 1370 | struct ib_mad_reg_req *mad_reg_req) |
| 1371 | { |
| 1372 | int i; |
| 1373 | |
Akinobu Mita | 19b629f | 2010-03-05 13:41:38 -0800 | [diff] [blame] | 1374 | for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1375 | if ((*method)->agent[i]) { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 1376 | pr_err("Method %d already in use\n", i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1377 | return -EINVAL; |
| 1378 | } |
| 1379 | } |
| 1380 | return 0; |
| 1381 | } |
| 1382 | |
| 1383 | static int allocate_method_table(struct ib_mad_mgmt_method_table **method) |
| 1384 | { |
| 1385 | /* Allocate management method table */ |
Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 1386 | *method = kzalloc(sizeof **method, GFP_ATOMIC); |
Leon Romanovsky | 2716243 | 2016-11-03 16:44:09 +0200 | [diff] [blame] | 1387 | return (*method) ? 0 : (-ENOMEM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1388 | } |
| 1389 | |
| 1390 | /* |
| 1391 | * Check to see if there are any methods still in use |
| 1392 | */ |
| 1393 | static int check_method_table(struct ib_mad_mgmt_method_table *method) |
| 1394 | { |
| 1395 | int i; |
| 1396 | |
| 1397 | for (i = 0; i < IB_MGMT_MAX_METHODS; i++) |
| 1398 | if (method->agent[i]) |
| 1399 | return 1; |
| 1400 | return 0; |
| 1401 | } |
| 1402 | |
| 1403 | /* |
| 1404 | * Check to see if there are any method tables for this class still in use |
| 1405 | */ |
| 1406 | static int check_class_table(struct ib_mad_mgmt_class_table *class) |
| 1407 | { |
| 1408 | int i; |
| 1409 | |
| 1410 | for (i = 0; i < MAX_MGMT_CLASS; i++) |
| 1411 | if (class->method_table[i]) |
| 1412 | return 1; |
| 1413 | return 0; |
| 1414 | } |
| 1415 | |
| 1416 | static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) |
| 1417 | { |
| 1418 | int i; |
| 1419 | |
| 1420 | for (i = 0; i < MAX_MGMT_OUI; i++) |
| 1421 | if (vendor_class->method_table[i]) |
| 1422 | return 1; |
| 1423 | return 0; |
| 1424 | } |
| 1425 | |
| 1426 | static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, |
Ira Weiny | d94bd26 | 2015-06-06 14:38:22 -0400 | [diff] [blame] | 1427 | const char *oui) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1428 | { |
| 1429 | int i; |
| 1430 | |
| 1431 | for (i = 0; i < MAX_MGMT_OUI; i++) |
Roland Dreier | 3cd9656 | 2006-09-22 15:22:46 -0700 | [diff] [blame] | 1432 | /* Is there matching OUI for this vendor class ? */ |
| 1433 | if (!memcmp(vendor_class->oui[i], oui, 3)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1434 | return i; |
| 1435 | |
| 1436 | return -1; |
| 1437 | } |
| 1438 | |
| 1439 | static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor) |
| 1440 | { |
| 1441 | int i; |
| 1442 | |
| 1443 | for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) |
| 1444 | if (vendor->vendor_class[i]) |
| 1445 | return 1; |
| 1446 | |
| 1447 | return 0; |
| 1448 | } |
| 1449 | |
| 1450 | static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, |
| 1451 | struct ib_mad_agent_private *agent) |
| 1452 | { |
| 1453 | int i; |
| 1454 | |
| 1455 | /* Remove any methods for this mad agent */ |
| 1456 | for (i = 0; i < IB_MGMT_MAX_METHODS; i++) { |
| 1457 | if (method->agent[i] == agent) { |
| 1458 | method->agent[i] = NULL; |
| 1459 | } |
| 1460 | } |
| 1461 | } |
| 1462 | |
| 1463 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, |
| 1464 | struct ib_mad_agent_private *agent_priv, |
| 1465 | u8 mgmt_class) |
| 1466 | { |
| 1467 | struct ib_mad_port_private *port_priv; |
| 1468 | struct ib_mad_mgmt_class_table **class; |
| 1469 | struct ib_mad_mgmt_method_table **method; |
| 1470 | int i, ret; |
| 1471 | |
| 1472 | port_priv = agent_priv->qp_info->port_priv; |
| 1473 | class = &port_priv->version[mad_reg_req->mgmt_class_version].class; |
| 1474 | if (!*class) { |
| 1475 | /* Allocate management class table for "new" class version */ |
Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 1476 | *class = kzalloc(sizeof **class, GFP_ATOMIC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1477 | if (!*class) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1478 | ret = -ENOMEM; |
| 1479 | goto error1; |
| 1480 | } |
Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 1481 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1482 | /* Allocate method table for this management class */ |
| 1483 | method = &(*class)->method_table[mgmt_class]; |
| 1484 | if ((ret = allocate_method_table(method))) |
| 1485 | goto error2; |
| 1486 | } else { |
| 1487 | method = &(*class)->method_table[mgmt_class]; |
| 1488 | if (!*method) { |
| 1489 | /* Allocate method table for this management class */ |
| 1490 | if ((ret = allocate_method_table(method))) |
| 1491 | goto error1; |
| 1492 | } |
| 1493 | } |
| 1494 | |
| 1495 | /* Now, make sure methods are not already in use */ |
| 1496 | if (method_in_use(method, mad_reg_req)) |
| 1497 | goto error3; |
| 1498 | |
| 1499 | /* Finally, add in methods being registered */ |
Akinobu Mita | 19b629f | 2010-03-05 13:41:38 -0800 | [diff] [blame] | 1500 | for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1501 | (*method)->agent[i] = agent_priv; |
Akinobu Mita | 19b629f | 2010-03-05 13:41:38 -0800 | [diff] [blame] | 1502 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1503 | return 0; |
| 1504 | |
| 1505 | error3: |
| 1506 | /* Remove any methods for this mad agent */ |
| 1507 | remove_methods_mad_agent(*method, agent_priv); |
| 1508 | /* Now, check to see if there are any methods in use */ |
| 1509 | if (!check_method_table(*method)) { |
| 1510 | /* If not, release management method table */ |
| 1511 | kfree(*method); |
| 1512 | *method = NULL; |
| 1513 | } |
| 1514 | ret = -EINVAL; |
| 1515 | goto error1; |
| 1516 | error2: |
| 1517 | kfree(*class); |
| 1518 | *class = NULL; |
| 1519 | error1: |
| 1520 | return ret; |
| 1521 | } |
| 1522 | |
| 1523 | static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, |
| 1524 | struct ib_mad_agent_private *agent_priv) |
| 1525 | { |
| 1526 | struct ib_mad_port_private *port_priv; |
| 1527 | struct ib_mad_mgmt_vendor_class_table **vendor_table; |
| 1528 | struct ib_mad_mgmt_vendor_class_table *vendor = NULL; |
| 1529 | struct ib_mad_mgmt_vendor_class *vendor_class = NULL; |
| 1530 | struct ib_mad_mgmt_method_table **method; |
| 1531 | int i, ret = -ENOMEM; |
| 1532 | u8 vclass; |
| 1533 | |
| 1534 | /* "New" vendor (with OUI) class */ |
| 1535 | vclass = vendor_class_index(mad_reg_req->mgmt_class); |
| 1536 | port_priv = agent_priv->qp_info->port_priv; |
| 1537 | vendor_table = &port_priv->version[ |
| 1538 | mad_reg_req->mgmt_class_version].vendor; |
| 1539 | if (!*vendor_table) { |
| 1540 | /* Allocate mgmt vendor class table for "new" class version */ |
Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 1541 | vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); |
Leon Romanovsky | 2716243 | 2016-11-03 16:44:09 +0200 | [diff] [blame] | 1542 | if (!vendor) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1543 | goto error1; |
Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 1544 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1545 | *vendor_table = vendor; |
| 1546 | } |
| 1547 | if (!(*vendor_table)->vendor_class[vclass]) { |
| 1548 | /* Allocate table for this management vendor class */ |
Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 1549 | vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); |
Leon Romanovsky | 2716243 | 2016-11-03 16:44:09 +0200 | [diff] [blame] | 1550 | if (!vendor_class) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1551 | goto error2; |
Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 1552 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1553 | (*vendor_table)->vendor_class[vclass] = vendor_class; |
| 1554 | } |
| 1555 | for (i = 0; i < MAX_MGMT_OUI; i++) { |
| 1556 | /* Is there matching OUI for this vendor class ? */ |
| 1557 | if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], |
| 1558 | mad_reg_req->oui, 3)) { |
| 1559 | method = &(*vendor_table)->vendor_class[ |
| 1560 | vclass]->method_table[i]; |
| 1561 | BUG_ON(!*method); |
| 1562 | goto check_in_use; |
| 1563 | } |
| 1564 | } |
| 1565 | for (i = 0; i < MAX_MGMT_OUI; i++) { |
| 1566 | /* OUI slot available ? */ |
| 1567 | if (!is_vendor_oui((*vendor_table)->vendor_class[ |
| 1568 | vclass]->oui[i])) { |
| 1569 | method = &(*vendor_table)->vendor_class[ |
| 1570 | vclass]->method_table[i]; |
| 1571 | BUG_ON(*method); |
| 1572 | /* Allocate method table for this OUI */ |
| 1573 | if ((ret = allocate_method_table(method))) |
| 1574 | goto error3; |
| 1575 | memcpy((*vendor_table)->vendor_class[vclass]->oui[i], |
| 1576 | mad_reg_req->oui, 3); |
| 1577 | goto check_in_use; |
| 1578 | } |
| 1579 | } |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 1580 | dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1581 | goto error3; |
| 1582 | |
| 1583 | check_in_use: |
| 1584 | /* Now, make sure methods are not already in use */ |
| 1585 | if (method_in_use(method, mad_reg_req)) |
| 1586 | goto error4; |
| 1587 | |
| 1588 | /* Finally, add in methods being registered */ |
Akinobu Mita | 19b629f | 2010-03-05 13:41:38 -0800 | [diff] [blame] | 1589 | for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1590 | (*method)->agent[i] = agent_priv; |
Akinobu Mita | 19b629f | 2010-03-05 13:41:38 -0800 | [diff] [blame] | 1591 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1592 | return 0; |
| 1593 | |
| 1594 | error4: |
| 1595 | /* Remove any methods for this mad agent */ |
| 1596 | remove_methods_mad_agent(*method, agent_priv); |
| 1597 | /* Now, check to see if there are any methods in use */ |
| 1598 | if (!check_method_table(*method)) { |
| 1599 | /* If not, release management method table */ |
| 1600 | kfree(*method); |
| 1601 | *method = NULL; |
| 1602 | } |
| 1603 | ret = -EINVAL; |
| 1604 | error3: |
| 1605 | if (vendor_class) { |
| 1606 | (*vendor_table)->vendor_class[vclass] = NULL; |
| 1607 | kfree(vendor_class); |
| 1608 | } |
| 1609 | error2: |
| 1610 | if (vendor) { |
| 1611 | *vendor_table = NULL; |
| 1612 | kfree(vendor); |
| 1613 | } |
| 1614 | error1: |
| 1615 | return ret; |
| 1616 | } |
| 1617 | |
| 1618 | static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) |
| 1619 | { |
| 1620 | struct ib_mad_port_private *port_priv; |
| 1621 | struct ib_mad_mgmt_class_table *class; |
| 1622 | struct ib_mad_mgmt_method_table *method; |
| 1623 | struct ib_mad_mgmt_vendor_class_table *vendor; |
| 1624 | struct ib_mad_mgmt_vendor_class *vendor_class; |
| 1625 | int index; |
| 1626 | u8 mgmt_class; |
| 1627 | |
| 1628 | /* |
| 1629 | * Was MAD registration request supplied |
| 1630 | * with original registration ? |
| 1631 | */ |
| 1632 | if (!agent_priv->reg_req) { |
| 1633 | goto out; |
| 1634 | } |
| 1635 | |
| 1636 | port_priv = agent_priv->qp_info->port_priv; |
| 1637 | mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); |
| 1638 | class = port_priv->version[ |
| 1639 | agent_priv->reg_req->mgmt_class_version].class; |
| 1640 | if (!class) |
| 1641 | goto vendor_check; |
| 1642 | |
| 1643 | method = class->method_table[mgmt_class]; |
| 1644 | if (method) { |
| 1645 | /* Remove any methods for this mad agent */ |
| 1646 | remove_methods_mad_agent(method, agent_priv); |
| 1647 | /* Now, check to see if there are any methods still in use */ |
| 1648 | if (!check_method_table(method)) { |
| 1649 | /* If not, release management method table */ |
Bart Van Assche | 2190d10 | 2016-06-03 12:08:44 -0700 | [diff] [blame] | 1650 | kfree(method); |
| 1651 | class->method_table[mgmt_class] = NULL; |
| 1652 | /* Any management classes left ? */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1653 | if (!check_class_table(class)) { |
| 1654 | /* If not, release management class table */ |
| 1655 | kfree(class); |
| 1656 | port_priv->version[ |
| 1657 | agent_priv->reg_req-> |
| 1658 | mgmt_class_version].class = NULL; |
| 1659 | } |
| 1660 | } |
| 1661 | } |
| 1662 | |
| 1663 | vendor_check: |
| 1664 | if (!is_vendor_class(mgmt_class)) |
| 1665 | goto out; |
| 1666 | |
| 1667 | /* normalize mgmt_class to vendor range 2 */ |
| 1668 | mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); |
| 1669 | vendor = port_priv->version[ |
| 1670 | agent_priv->reg_req->mgmt_class_version].vendor; |
| 1671 | |
| 1672 | if (!vendor) |
| 1673 | goto out; |
| 1674 | |
| 1675 | vendor_class = vendor->vendor_class[mgmt_class]; |
| 1676 | if (vendor_class) { |
| 1677 | index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); |
| 1678 | if (index < 0) |
| 1679 | goto out; |
| 1680 | method = vendor_class->method_table[index]; |
| 1681 | if (method) { |
| 1682 | /* Remove any methods for this mad agent */ |
| 1683 | remove_methods_mad_agent(method, agent_priv); |
| 1684 | /* |
| 1685 | * Now, check to see if there are |
| 1686 | * any methods still in use |
| 1687 | */ |
| 1688 | if (!check_method_table(method)) { |
| 1689 | /* If not, release management method table */ |
| 1690 | kfree(method); |
| 1691 | vendor_class->method_table[index] = NULL; |
| 1692 | memset(vendor_class->oui[index], 0, 3); |
| 1693 | /* Any OUIs left ? */ |
| 1694 | if (!check_vendor_class(vendor_class)) { |
| 1695 | /* If not, release vendor class table */ |
| 1696 | kfree(vendor_class); |
| 1697 | vendor->vendor_class[mgmt_class] = NULL; |
| 1698 | /* Any other vendor classes left ? */ |
| 1699 | if (!check_vendor_table(vendor)) { |
| 1700 | kfree(vendor); |
| 1701 | port_priv->version[ |
| 1702 | agent_priv->reg_req-> |
| 1703 | mgmt_class_version]. |
| 1704 | vendor = NULL; |
| 1705 | } |
| 1706 | } |
| 1707 | } |
| 1708 | } |
| 1709 | } |
| 1710 | |
| 1711 | out: |
| 1712 | return; |
| 1713 | } |
| 1714 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1715 | static struct ib_mad_agent_private * |
| 1716 | find_mad_agent(struct ib_mad_port_private *port_priv, |
Ira Weiny | d94bd26 | 2015-06-06 14:38:22 -0400 | [diff] [blame] | 1717 | const struct ib_mad_hdr *mad_hdr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1718 | { |
| 1719 | struct ib_mad_agent_private *mad_agent = NULL; |
| 1720 | unsigned long flags; |
| 1721 | |
| 1722 | spin_lock_irqsave(&port_priv->reg_lock, flags); |
Ira Weiny | d94bd26 | 2015-06-06 14:38:22 -0400 | [diff] [blame] | 1723 | if (ib_response_mad(mad_hdr)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1724 | u32 hi_tid; |
| 1725 | struct ib_mad_agent_private *entry; |
| 1726 | |
| 1727 | /* |
| 1728 | * Routing is based on high 32 bits of transaction ID |
| 1729 | * of MAD. |
| 1730 | */ |
Ira Weiny | d94bd26 | 2015-06-06 14:38:22 -0400 | [diff] [blame] | 1731 | hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1732 | list_for_each_entry(entry, &port_priv->agent_list, agent_list) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1733 | if (entry->agent.hi_tid == hi_tid) { |
| 1734 | mad_agent = entry; |
| 1735 | break; |
| 1736 | } |
| 1737 | } |
| 1738 | } else { |
| 1739 | struct ib_mad_mgmt_class_table *class; |
| 1740 | struct ib_mad_mgmt_method_table *method; |
| 1741 | struct ib_mad_mgmt_vendor_class_table *vendor; |
| 1742 | struct ib_mad_mgmt_vendor_class *vendor_class; |
Ira Weiny | d94bd26 | 2015-06-06 14:38:22 -0400 | [diff] [blame] | 1743 | const struct ib_vendor_mad *vendor_mad; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1744 | int index; |
| 1745 | |
| 1746 | /* |
| 1747 | * Routing is based on version, class, and method |
| 1748 | * For "newer" vendor MADs, also based on OUI |
| 1749 | */ |
Ira Weiny | d94bd26 | 2015-06-06 14:38:22 -0400 | [diff] [blame] | 1750 | if (mad_hdr->class_version >= MAX_MGMT_VERSION) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1751 | goto out; |
Ira Weiny | d94bd26 | 2015-06-06 14:38:22 -0400 | [diff] [blame] | 1752 | if (!is_vendor_class(mad_hdr->mgmt_class)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1753 | class = port_priv->version[ |
Ira Weiny | d94bd26 | 2015-06-06 14:38:22 -0400 | [diff] [blame] | 1754 | mad_hdr->class_version].class; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1755 | if (!class) |
| 1756 | goto out; |
Ira Weiny | d94bd26 | 2015-06-06 14:38:22 -0400 | [diff] [blame] | 1757 | if (convert_mgmt_class(mad_hdr->mgmt_class) >= |
Bart Van Assche | 2fe2f37 | 2016-11-21 10:21:17 -0800 | [diff] [blame] | 1758 | ARRAY_SIZE(class->method_table)) |
Hefty, Sean | b7ab0b1 | 2011-10-06 09:33:05 -0700 | [diff] [blame] | 1759 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1760 | method = class->method_table[convert_mgmt_class( |
Ira Weiny | d94bd26 | 2015-06-06 14:38:22 -0400 | [diff] [blame] | 1761 | mad_hdr->mgmt_class)]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1762 | if (method) |
Ira Weiny | d94bd26 | 2015-06-06 14:38:22 -0400 | [diff] [blame] | 1763 | mad_agent = method->agent[mad_hdr->method & |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1764 | ~IB_MGMT_METHOD_RESP]; |
| 1765 | } else { |
| 1766 | vendor = port_priv->version[ |
Ira Weiny | d94bd26 | 2015-06-06 14:38:22 -0400 | [diff] [blame] | 1767 | mad_hdr->class_version].vendor; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1768 | if (!vendor) |
| 1769 | goto out; |
| 1770 | vendor_class = vendor->vendor_class[vendor_class_index( |
Ira Weiny | d94bd26 | 2015-06-06 14:38:22 -0400 | [diff] [blame] | 1771 | mad_hdr->mgmt_class)]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1772 | if (!vendor_class) |
| 1773 | goto out; |
| 1774 | /* Find matching OUI */ |
Ira Weiny | d94bd26 | 2015-06-06 14:38:22 -0400 | [diff] [blame] | 1775 | vendor_mad = (const struct ib_vendor_mad *)mad_hdr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1776 | index = find_vendor_oui(vendor_class, vendor_mad->oui); |
| 1777 | if (index == -1) |
| 1778 | goto out; |
| 1779 | method = vendor_class->method_table[index]; |
| 1780 | if (method) { |
Ira Weiny | d94bd26 | 2015-06-06 14:38:22 -0400 | [diff] [blame] | 1781 | mad_agent = method->agent[mad_hdr->method & |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1782 | ~IB_MGMT_METHOD_RESP]; |
| 1783 | } |
| 1784 | } |
| 1785 | } |
| 1786 | |
| 1787 | if (mad_agent) { |
| 1788 | if (mad_agent->agent.recv_handler) |
| 1789 | atomic_inc(&mad_agent->refcount); |
| 1790 | else { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 1791 | dev_notice(&port_priv->device->dev, |
| 1792 | "No receive handler for client %p on port %d\n", |
| 1793 | &mad_agent->agent, port_priv->port_num); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1794 | mad_agent = NULL; |
| 1795 | } |
| 1796 | } |
| 1797 | out: |
| 1798 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); |
| 1799 | |
| 1800 | return mad_agent; |
| 1801 | } |
| 1802 | |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 1803 | static int validate_mad(const struct ib_mad_hdr *mad_hdr, |
| 1804 | const struct ib_mad_qp_info *qp_info, |
| 1805 | bool opa) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1806 | { |
| 1807 | int valid = 0; |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 1808 | u32 qp_num = qp_info->qp->qp_num; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1809 | |
| 1810 | /* Make sure MAD base version is understood */ |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 1811 | if (mad_hdr->base_version != IB_MGMT_BASE_VERSION && |
| 1812 | (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) { |
| 1813 | pr_err("MAD received with unsupported base version %d %s\n", |
| 1814 | mad_hdr->base_version, opa ? "(opa)" : ""); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1815 | goto out; |
| 1816 | } |
| 1817 | |
| 1818 | /* Filter SMI packets sent to other than QP0 */ |
Ira Weiny | 77f6083 | 2015-05-08 14:27:21 -0400 | [diff] [blame] | 1819 | if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || |
| 1820 | (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1821 | if (qp_num == 0) |
| 1822 | valid = 1; |
| 1823 | } else { |
Hal Rosenstock | 5337088 | 2015-11-13 15:22:22 -0500 | [diff] [blame] | 1824 | /* CM attributes other than ClassPortInfo only use Send method */ |
| 1825 | if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && |
| 1826 | (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && |
| 1827 | (mad_hdr->method != IB_MGMT_METHOD_SEND)) |
| 1828 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1829 | /* Filter GSI packets sent to QP0 */ |
| 1830 | if (qp_num != 0) |
| 1831 | valid = 1; |
| 1832 | } |
| 1833 | |
| 1834 | out: |
| 1835 | return valid; |
| 1836 | } |
| 1837 | |
Ira Weiny | f766c58 | 2015-05-08 14:27:24 -0400 | [diff] [blame] | 1838 | static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, |
| 1839 | const struct ib_mad_hdr *mad_hdr) |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1840 | { |
| 1841 | struct ib_rmpp_mad *rmpp_mad; |
| 1842 | |
| 1843 | rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; |
| 1844 | return !mad_agent_priv->agent.rmpp_version || |
Ira Weiny | 1471cb6 | 2014-08-08 19:00:56 -0400 | [diff] [blame] | 1845 | !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1846 | !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & |
| 1847 | IB_MGMT_RMPP_FLAG_ACTIVE) || |
| 1848 | (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); |
| 1849 | } |
| 1850 | |
Ira Weiny | 8bf4b30 | 2015-05-08 14:27:23 -0400 | [diff] [blame] | 1851 | static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, |
| 1852 | const struct ib_mad_recv_wc *rwc) |
Jack Morgenstein | fa9656b | 2006-03-28 16:39:07 -0800 | [diff] [blame] | 1853 | { |
Ira Weiny | 8bf4b30 | 2015-05-08 14:27:23 -0400 | [diff] [blame] | 1854 | return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == |
Jack Morgenstein | fa9656b | 2006-03-28 16:39:07 -0800 | [diff] [blame] | 1855 | rwc->recv_buf.mad->mad_hdr.mgmt_class; |
| 1856 | } |
| 1857 | |
Ira Weiny | f766c58 | 2015-05-08 14:27:24 -0400 | [diff] [blame] | 1858 | static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, |
| 1859 | const struct ib_mad_send_wr_private *wr, |
| 1860 | const struct ib_mad_recv_wc *rwc ) |
Jack Morgenstein | fa9656b | 2006-03-28 16:39:07 -0800 | [diff] [blame] | 1861 | { |
Dasaratharaman Chandramouli | 9089885 | 2017-04-29 14:41:18 -0400 | [diff] [blame] | 1862 | struct rdma_ah_attr attr; |
Jack Morgenstein | fa9656b | 2006-03-28 16:39:07 -0800 | [diff] [blame] | 1863 | u8 send_resp, rcv_resp; |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1864 | union ib_gid sgid; |
| 1865 | struct ib_device *device = mad_agent_priv->agent.device; |
| 1866 | u8 port_num = mad_agent_priv->agent.port_num; |
| 1867 | u8 lmc; |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1868 | bool has_grh; |
Jack Morgenstein | fa9656b | 2006-03-28 16:39:07 -0800 | [diff] [blame] | 1869 | |
Ira Weiny | 9690930 | 2015-05-08 14:27:22 -0400 | [diff] [blame] | 1870 | send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); |
| 1871 | rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); |
Jack Morgenstein | fa9656b | 2006-03-28 16:39:07 -0800 | [diff] [blame] | 1872 | |
Jack Morgenstein | fa9656b | 2006-03-28 16:39:07 -0800 | [diff] [blame] | 1873 | if (send_resp == rcv_resp) |
| 1874 | /* both requests, or both responses. GIDs different */ |
| 1875 | return 0; |
| 1876 | |
Dasaratharaman Chandramouli | bfbfd66 | 2017-04-29 14:41:21 -0400 | [diff] [blame] | 1877 | if (rdma_query_ah(wr->send_buf.ah, &attr)) |
Jack Morgenstein | fa9656b | 2006-03-28 16:39:07 -0800 | [diff] [blame] | 1878 | /* Assume not equal, to avoid false positives. */ |
| 1879 | return 0; |
| 1880 | |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1881 | has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH); |
| 1882 | if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH)) |
Jack Morgenstein | fa9656b | 2006-03-28 16:39:07 -0800 | [diff] [blame] | 1883 | /* one has GID, other does not. Assume different */ |
| 1884 | return 0; |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1885 | |
| 1886 | if (!send_resp && rcv_resp) { |
| 1887 | /* is request/response. */ |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1888 | if (!has_grh) { |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1889 | if (ib_get_cached_lmc(device, port_num, &lmc)) |
| 1890 | return 0; |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1891 | return (!lmc || !((rdma_ah_get_path_bits(&attr) ^ |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1892 | rwc->wc->dlid_path_bits) & |
| 1893 | ((1 << lmc) - 1))); |
| 1894 | } else { |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1895 | const struct ib_global_route *grh = |
| 1896 | rdma_ah_read_grh(&attr); |
| 1897 | |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1898 | if (ib_get_cached_gid(device, port_num, |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1899 | grh->sgid_index, &sgid, NULL)) |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1900 | return 0; |
| 1901 | return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, |
| 1902 | 16); |
| 1903 | } |
| 1904 | } |
| 1905 | |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1906 | if (!has_grh) |
| 1907 | return rdma_ah_get_dlid(&attr) == rwc->wc->slid; |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1908 | else |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1909 | return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw, |
| 1910 | rwc->recv_buf.grh->sgid.raw, |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1911 | 16); |
Jack Morgenstein | fa9656b | 2006-03-28 16:39:07 -0800 | [diff] [blame] | 1912 | } |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1913 | |
| 1914 | static inline int is_direct(u8 class) |
| 1915 | { |
| 1916 | return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); |
| 1917 | } |
| 1918 | |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1919 | struct ib_mad_send_wr_private* |
Ira Weiny | f766c58 | 2015-05-08 14:27:24 -0400 | [diff] [blame] | 1920 | ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, |
| 1921 | const struct ib_mad_recv_wc *wc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1922 | { |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1923 | struct ib_mad_send_wr_private *wr; |
Ira Weiny | 83a1d22 | 2015-06-06 14:38:23 -0400 | [diff] [blame] | 1924 | const struct ib_mad_hdr *mad_hdr; |
Jack Morgenstein | fa9656b | 2006-03-28 16:39:07 -0800 | [diff] [blame] | 1925 | |
Ira Weiny | 83a1d22 | 2015-06-06 14:38:23 -0400 | [diff] [blame] | 1926 | mad_hdr = &wc->recv_buf.mad->mad_hdr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1927 | |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1928 | list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { |
Ira Weiny | 83a1d22 | 2015-06-06 14:38:23 -0400 | [diff] [blame] | 1929 | if ((wr->tid == mad_hdr->tid) && |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1930 | rcv_has_same_class(wr, wc) && |
| 1931 | /* |
| 1932 | * Don't check GID for direct routed MADs. |
| 1933 | * These might have permissive LIDs. |
| 1934 | */ |
Ira Weiny | 83a1d22 | 2015-06-06 14:38:23 -0400 | [diff] [blame] | 1935 | (is_direct(mad_hdr->mgmt_class) || |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1936 | rcv_has_same_gid(mad_agent_priv, wr, wc))) |
Roland Dreier | 3979869 | 2006-11-13 09:38:07 -0800 | [diff] [blame] | 1937 | return (wr->status == IB_WC_SUCCESS) ? wr : NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1938 | } |
| 1939 | |
| 1940 | /* |
| 1941 | * It's possible to receive the response before we've |
| 1942 | * been notified that the send has completed |
| 1943 | */ |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1944 | list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { |
Ira Weiny | c597eee | 2015-05-08 13:10:03 -0400 | [diff] [blame] | 1945 | if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && |
Ira Weiny | 83a1d22 | 2015-06-06 14:38:23 -0400 | [diff] [blame] | 1946 | wr->tid == mad_hdr->tid && |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1947 | wr->timeout && |
| 1948 | rcv_has_same_class(wr, wc) && |
| 1949 | /* |
| 1950 | * Don't check GID for direct routed MADs. |
| 1951 | * These might have permissive LIDs. |
| 1952 | */ |
Ira Weiny | 83a1d22 | 2015-06-06 14:38:23 -0400 | [diff] [blame] | 1953 | (is_direct(mad_hdr->mgmt_class) || |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1954 | rcv_has_same_gid(mad_agent_priv, wr, wc))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1955 | /* Verify request has not been canceled */ |
Jack Morgenstein | 9874e74 | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1956 | return (wr->status == IB_WC_SUCCESS) ? wr : NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1957 | } |
| 1958 | return NULL; |
| 1959 | } |
| 1960 | |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1961 | void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) |
Hal Rosenstock | 6a0c435 | 2005-07-27 11:45:26 -0700 | [diff] [blame] | 1962 | { |
| 1963 | mad_send_wr->timeout = 0; |
Akinobu Mita | 179e091 | 2006-06-26 00:24:41 -0700 | [diff] [blame] | 1964 | if (mad_send_wr->refcount == 1) |
| 1965 | list_move_tail(&mad_send_wr->agent_list, |
Hal Rosenstock | 6a0c435 | 2005-07-27 11:45:26 -0700 | [diff] [blame] | 1966 | &mad_send_wr->mad_agent_priv->done_list); |
Hal Rosenstock | 6a0c435 | 2005-07-27 11:45:26 -0700 | [diff] [blame] | 1967 | } |
| 1968 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1969 | static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, |
Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 1970 | struct ib_mad_recv_wc *mad_recv_wc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1971 | { |
| 1972 | struct ib_mad_send_wr_private *mad_send_wr; |
| 1973 | struct ib_mad_send_wc mad_send_wc; |
| 1974 | unsigned long flags; |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 1975 | int ret; |
| 1976 | |
| 1977 | ret = ib_mad_enforce_security(mad_agent_priv, |
| 1978 | mad_recv_wc->wc->pkey_index); |
| 1979 | if (ret) { |
| 1980 | ib_free_recv_mad(mad_recv_wc); |
| 1981 | deref_mad_agent(mad_agent_priv); |
| 1982 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1983 | |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1984 | INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); |
| 1985 | list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); |
Ira Weiny | 1471cb6 | 2014-08-08 19:00:56 -0400 | [diff] [blame] | 1986 | if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1987 | mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, |
| 1988 | mad_recv_wc); |
| 1989 | if (!mad_recv_wc) { |
Sean Hefty | 1b52fa98 | 2006-05-12 14:57:52 -0700 | [diff] [blame] | 1990 | deref_mad_agent(mad_agent_priv); |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1991 | return; |
| 1992 | } |
| 1993 | } |
| 1994 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1995 | /* Complete corresponding request */ |
Ira Weiny | 9690930 | 2015-05-08 14:27:22 -0400 | [diff] [blame] | 1996 | if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1997 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
Jack Morgenstein | fa9656b | 2006-03-28 16:39:07 -0800 | [diff] [blame] | 1998 | mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1999 | if (!mad_send_wr) { |
| 2000 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
Ira Weiny | 1471cb6 | 2014-08-08 19:00:56 -0400 | [diff] [blame] | 2001 | if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) |
| 2002 | && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) |
| 2003 | && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) |
| 2004 | & IB_MGMT_RMPP_FLAG_ACTIVE)) { |
| 2005 | /* user rmpp is in effect |
| 2006 | * and this is an active RMPP MAD |
| 2007 | */ |
Christoph Hellwig | ca28126 | 2016-01-04 14:15:58 +0100 | [diff] [blame] | 2008 | mad_agent_priv->agent.recv_handler( |
| 2009 | &mad_agent_priv->agent, NULL, |
| 2010 | mad_recv_wc); |
Ira Weiny | 1471cb6 | 2014-08-08 19:00:56 -0400 | [diff] [blame] | 2011 | atomic_dec(&mad_agent_priv->refcount); |
| 2012 | } else { |
| 2013 | /* not user rmpp, revert to normal behavior and |
| 2014 | * drop the mad */ |
| 2015 | ib_free_recv_mad(mad_recv_wc); |
| 2016 | deref_mad_agent(mad_agent_priv); |
| 2017 | return; |
| 2018 | } |
| 2019 | } else { |
| 2020 | ib_mark_mad_done(mad_send_wr); |
| 2021 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
| 2022 | |
| 2023 | /* Defined behavior is to complete response before request */ |
Christoph Hellwig | ca28126 | 2016-01-04 14:15:58 +0100 | [diff] [blame] | 2024 | mad_agent_priv->agent.recv_handler( |
| 2025 | &mad_agent_priv->agent, |
| 2026 | &mad_send_wr->send_buf, |
| 2027 | mad_recv_wc); |
Ira Weiny | 1471cb6 | 2014-08-08 19:00:56 -0400 | [diff] [blame] | 2028 | atomic_dec(&mad_agent_priv->refcount); |
| 2029 | |
| 2030 | mad_send_wc.status = IB_WC_SUCCESS; |
| 2031 | mad_send_wc.vendor_err = 0; |
| 2032 | mad_send_wc.send_buf = &mad_send_wr->send_buf; |
| 2033 | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2034 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2035 | } else { |
Christoph Hellwig | ca28126 | 2016-01-04 14:15:58 +0100 | [diff] [blame] | 2036 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL, |
Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 2037 | mad_recv_wc); |
Sean Hefty | 1b52fa98 | 2006-05-12 14:57:52 -0700 | [diff] [blame] | 2038 | deref_mad_agent(mad_agent_priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2039 | } |
Daniel Jurgens | 47a2b33 | 2017-05-19 15:48:54 +0300 | [diff] [blame] | 2040 | |
| 2041 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2042 | } |
| 2043 | |
Ira Weiny | e11ae8a | 2015-06-06 14:38:24 -0400 | [diff] [blame] | 2044 | static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, |
| 2045 | const struct ib_mad_qp_info *qp_info, |
| 2046 | const struct ib_wc *wc, |
| 2047 | int port_num, |
| 2048 | struct ib_mad_private *recv, |
| 2049 | struct ib_mad_private *response) |
| 2050 | { |
| 2051 | enum smi_forward_action retsmi; |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2052 | struct ib_smp *smp = (struct ib_smp *)recv->mad; |
Ira Weiny | e11ae8a | 2015-06-06 14:38:24 -0400 | [diff] [blame] | 2053 | |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2054 | if (smi_handle_dr_smp_recv(smp, |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 2055 | rdma_cap_ib_switch(port_priv->device), |
Ira Weiny | e11ae8a | 2015-06-06 14:38:24 -0400 | [diff] [blame] | 2056 | port_num, |
| 2057 | port_priv->device->phys_port_cnt) == |
| 2058 | IB_SMI_DISCARD) |
| 2059 | return IB_SMI_DISCARD; |
| 2060 | |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2061 | retsmi = smi_check_forward_dr_smp(smp); |
Ira Weiny | e11ae8a | 2015-06-06 14:38:24 -0400 | [diff] [blame] | 2062 | if (retsmi == IB_SMI_LOCAL) |
| 2063 | return IB_SMI_HANDLE; |
| 2064 | |
| 2065 | if (retsmi == IB_SMI_SEND) { /* don't forward */ |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2066 | if (smi_handle_dr_smp_send(smp, |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 2067 | rdma_cap_ib_switch(port_priv->device), |
Ira Weiny | e11ae8a | 2015-06-06 14:38:24 -0400 | [diff] [blame] | 2068 | port_num) == IB_SMI_DISCARD) |
| 2069 | return IB_SMI_DISCARD; |
| 2070 | |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2071 | if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) |
Ira Weiny | e11ae8a | 2015-06-06 14:38:24 -0400 | [diff] [blame] | 2072 | return IB_SMI_DISCARD; |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 2073 | } else if (rdma_cap_ib_switch(port_priv->device)) { |
Ira Weiny | e11ae8a | 2015-06-06 14:38:24 -0400 | [diff] [blame] | 2074 | /* forward case for switches */ |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2075 | memcpy(response, recv, mad_priv_size(response)); |
Ira Weiny | e11ae8a | 2015-06-06 14:38:24 -0400 | [diff] [blame] | 2076 | response->header.recv_wc.wc = &response->header.wc; |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2077 | response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; |
Ira Weiny | e11ae8a | 2015-06-06 14:38:24 -0400 | [diff] [blame] | 2078 | response->header.recv_wc.recv_buf.grh = &response->grh; |
| 2079 | |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2080 | agent_send_response((const struct ib_mad_hdr *)response->mad, |
Ira Weiny | e11ae8a | 2015-06-06 14:38:24 -0400 | [diff] [blame] | 2081 | &response->grh, wc, |
| 2082 | port_priv->device, |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2083 | smi_get_fwd_port(smp), |
| 2084 | qp_info->qp->qp_num, |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2085 | response->mad_size, |
| 2086 | false); |
Ira Weiny | e11ae8a | 2015-06-06 14:38:24 -0400 | [diff] [blame] | 2087 | |
| 2088 | return IB_SMI_DISCARD; |
| 2089 | } |
| 2090 | return IB_SMI_HANDLE; |
| 2091 | } |
| 2092 | |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2093 | static bool generate_unmatched_resp(const struct ib_mad_private *recv, |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2094 | struct ib_mad_private *response, |
| 2095 | size_t *resp_len, bool opa) |
Swapna Thete | 0b30704 | 2012-02-25 17:47:32 -0800 | [diff] [blame] | 2096 | { |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2097 | const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad; |
| 2098 | struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad; |
| 2099 | |
| 2100 | if (recv_hdr->method == IB_MGMT_METHOD_GET || |
| 2101 | recv_hdr->method == IB_MGMT_METHOD_SET) { |
| 2102 | memcpy(response, recv, mad_priv_size(response)); |
Swapna Thete | 0b30704 | 2012-02-25 17:47:32 -0800 | [diff] [blame] | 2103 | response->header.recv_wc.wc = &response->header.wc; |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2104 | response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; |
Swapna Thete | 0b30704 | 2012-02-25 17:47:32 -0800 | [diff] [blame] | 2105 | response->header.recv_wc.recv_buf.grh = &response->grh; |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2106 | resp_hdr->method = IB_MGMT_METHOD_GET_RESP; |
| 2107 | resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); |
| 2108 | if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) |
| 2109 | resp_hdr->status |= IB_SMP_DIRECTION; |
Swapna Thete | 0b30704 | 2012-02-25 17:47:32 -0800 | [diff] [blame] | 2110 | |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2111 | if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) { |
| 2112 | if (recv_hdr->mgmt_class == |
| 2113 | IB_MGMT_CLASS_SUBN_LID_ROUTED || |
| 2114 | recv_hdr->mgmt_class == |
| 2115 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) |
| 2116 | *resp_len = opa_get_smp_header_size( |
| 2117 | (struct opa_smp *)recv->mad); |
| 2118 | else |
| 2119 | *resp_len = sizeof(struct ib_mad_hdr); |
| 2120 | } |
| 2121 | |
Swapna Thete | 0b30704 | 2012-02-25 17:47:32 -0800 | [diff] [blame] | 2122 | return true; |
| 2123 | } else { |
| 2124 | return false; |
| 2125 | } |
| 2126 | } |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2127 | |
| 2128 | static enum smi_action |
| 2129 | handle_opa_smi(struct ib_mad_port_private *port_priv, |
| 2130 | struct ib_mad_qp_info *qp_info, |
| 2131 | struct ib_wc *wc, |
| 2132 | int port_num, |
| 2133 | struct ib_mad_private *recv, |
| 2134 | struct ib_mad_private *response) |
| 2135 | { |
| 2136 | enum smi_forward_action retsmi; |
| 2137 | struct opa_smp *smp = (struct opa_smp *)recv->mad; |
| 2138 | |
| 2139 | if (opa_smi_handle_dr_smp_recv(smp, |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 2140 | rdma_cap_ib_switch(port_priv->device), |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2141 | port_num, |
| 2142 | port_priv->device->phys_port_cnt) == |
| 2143 | IB_SMI_DISCARD) |
| 2144 | return IB_SMI_DISCARD; |
| 2145 | |
| 2146 | retsmi = opa_smi_check_forward_dr_smp(smp); |
| 2147 | if (retsmi == IB_SMI_LOCAL) |
| 2148 | return IB_SMI_HANDLE; |
| 2149 | |
| 2150 | if (retsmi == IB_SMI_SEND) { /* don't forward */ |
| 2151 | if (opa_smi_handle_dr_smp_send(smp, |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 2152 | rdma_cap_ib_switch(port_priv->device), |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2153 | port_num) == IB_SMI_DISCARD) |
| 2154 | return IB_SMI_DISCARD; |
| 2155 | |
| 2156 | if (opa_smi_check_local_smp(smp, port_priv->device) == |
| 2157 | IB_SMI_DISCARD) |
| 2158 | return IB_SMI_DISCARD; |
| 2159 | |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 2160 | } else if (rdma_cap_ib_switch(port_priv->device)) { |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2161 | /* forward case for switches */ |
| 2162 | memcpy(response, recv, mad_priv_size(response)); |
| 2163 | response->header.recv_wc.wc = &response->header.wc; |
| 2164 | response->header.recv_wc.recv_buf.opa_mad = |
| 2165 | (struct opa_mad *)response->mad; |
| 2166 | response->header.recv_wc.recv_buf.grh = &response->grh; |
| 2167 | |
| 2168 | agent_send_response((const struct ib_mad_hdr *)response->mad, |
| 2169 | &response->grh, wc, |
| 2170 | port_priv->device, |
| 2171 | opa_smi_get_fwd_port(smp), |
| 2172 | qp_info->qp->qp_num, |
| 2173 | recv->header.wc.byte_len, |
| 2174 | true); |
| 2175 | |
| 2176 | return IB_SMI_DISCARD; |
| 2177 | } |
| 2178 | |
| 2179 | return IB_SMI_HANDLE; |
| 2180 | } |
| 2181 | |
| 2182 | static enum smi_action |
| 2183 | handle_smi(struct ib_mad_port_private *port_priv, |
| 2184 | struct ib_mad_qp_info *qp_info, |
| 2185 | struct ib_wc *wc, |
| 2186 | int port_num, |
| 2187 | struct ib_mad_private *recv, |
| 2188 | struct ib_mad_private *response, |
| 2189 | bool opa) |
| 2190 | { |
| 2191 | struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; |
| 2192 | |
| 2193 | if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && |
Hal Rosenstock | 9fa240b | 2016-10-18 13:20:29 -0400 | [diff] [blame] | 2194 | mad_hdr->class_version == OPA_SM_CLASS_VERSION) |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2195 | return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, |
| 2196 | response); |
| 2197 | |
| 2198 | return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); |
| 2199 | } |
| 2200 | |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 2201 | static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2202 | { |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 2203 | struct ib_mad_port_private *port_priv = cq->cq_context; |
| 2204 | struct ib_mad_list_head *mad_list = |
| 2205 | container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2206 | struct ib_mad_qp_info *qp_info; |
| 2207 | struct ib_mad_private_header *mad_priv_hdr; |
Hal Rosenstock | 445d680 | 2007-08-03 10:45:17 -0700 | [diff] [blame] | 2208 | struct ib_mad_private *recv, *response = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2209 | struct ib_mad_agent_private *mad_agent; |
Hal Rosenstock | 1bae4db | 2007-05-14 17:21:52 -0400 | [diff] [blame] | 2210 | int port_num; |
Jack Morgenstein | a9e7432 | 2012-04-24 16:08:57 -0700 | [diff] [blame] | 2211 | int ret = IB_MAD_RESULT_SUCCESS; |
Ira Weiny | 4cd7c94 | 2015-06-06 14:38:31 -0400 | [diff] [blame] | 2212 | size_t mad_size; |
| 2213 | u16 resp_mad_pkey_index = 0; |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2214 | bool opa; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2215 | |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 2216 | if (list_empty_careful(&port_priv->port_list)) |
| 2217 | return; |
| 2218 | |
| 2219 | if (wc->status != IB_WC_SUCCESS) { |
| 2220 | /* |
| 2221 | * Receive errors indicate that the QP has entered the error |
| 2222 | * state - error handling/shutdown code will cleanup |
| 2223 | */ |
| 2224 | return; |
| 2225 | } |
| 2226 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2227 | qp_info = mad_list->mad_queue->qp_info; |
| 2228 | dequeue_mad(mad_list); |
| 2229 | |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2230 | opa = rdma_cap_opa_mad(qp_info->port_priv->device, |
| 2231 | qp_info->port_priv->port_num); |
| 2232 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2233 | mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, |
| 2234 | mad_list); |
| 2235 | recv = container_of(mad_priv_hdr, struct ib_mad_private, header); |
Ralph Campbell | 1527106 | 2006-12-12 14:28:30 -0800 | [diff] [blame] | 2236 | ib_dma_unmap_single(port_priv->device, |
| 2237 | recv->header.mapping, |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2238 | mad_priv_dma_size(recv), |
Ralph Campbell | 1527106 | 2006-12-12 14:28:30 -0800 | [diff] [blame] | 2239 | DMA_FROM_DEVICE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2240 | |
| 2241 | /* Setup MAD receive work completion from "normal" work completion */ |
Sean Hefty | 24239af | 2005-04-16 15:26:08 -0700 | [diff] [blame] | 2242 | recv->header.wc = *wc; |
| 2243 | recv->header.recv_wc.wc = &recv->header.wc; |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2244 | |
| 2245 | if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) { |
| 2246 | recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh); |
| 2247 | recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); |
| 2248 | } else { |
| 2249 | recv->header.recv_wc.mad_len = sizeof(struct ib_mad); |
| 2250 | recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); |
| 2251 | } |
| 2252 | |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2253 | recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2254 | recv->header.recv_wc.recv_buf.grh = &recv->grh; |
| 2255 | |
| 2256 | if (atomic_read(&qp_info->snoop_count)) |
| 2257 | snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); |
| 2258 | |
| 2259 | /* Validate MAD */ |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2260 | if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2261 | goto out; |
| 2262 | |
Ira Weiny | 4cd7c94 | 2015-06-06 14:38:31 -0400 | [diff] [blame] | 2263 | mad_size = recv->mad_size; |
| 2264 | response = alloc_mad_private(mad_size, GFP_KERNEL); |
Leon Romanovsky | 2716243 | 2016-11-03 16:44:09 +0200 | [diff] [blame] | 2265 | if (!response) |
Hal Rosenstock | 445d680 | 2007-08-03 10:45:17 -0700 | [diff] [blame] | 2266 | goto out; |
Hal Rosenstock | 445d680 | 2007-08-03 10:45:17 -0700 | [diff] [blame] | 2267 | |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 2268 | if (rdma_cap_ib_switch(port_priv->device)) |
Hal Rosenstock | 1bae4db | 2007-05-14 17:21:52 -0400 | [diff] [blame] | 2269 | port_num = wc->port_num; |
| 2270 | else |
| 2271 | port_num = port_priv->port_num; |
| 2272 | |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2273 | if (((struct ib_mad_hdr *)recv->mad)->mgmt_class == |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2274 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2275 | if (handle_smi(port_priv, qp_info, wc, port_num, recv, |
| 2276 | response, opa) |
Ira Weiny | e11ae8a | 2015-06-06 14:38:24 -0400 | [diff] [blame] | 2277 | == IB_SMI_DISCARD) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2278 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2279 | } |
| 2280 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2281 | /* Give driver "right of first refusal" on incoming MAD */ |
| 2282 | if (port_priv->device->process_mad) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2283 | ret = port_priv->device->process_mad(port_priv->device, 0, |
| 2284 | port_priv->port_num, |
| 2285 | wc, &recv->grh, |
Ira Weiny | 4cd7c94 | 2015-06-06 14:38:31 -0400 | [diff] [blame] | 2286 | (const struct ib_mad_hdr *)recv->mad, |
| 2287 | recv->mad_size, |
| 2288 | (struct ib_mad_hdr *)response->mad, |
| 2289 | &mad_size, &resp_mad_pkey_index); |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2290 | |
| 2291 | if (opa) |
| 2292 | wc->pkey_index = resp_mad_pkey_index; |
| 2293 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2294 | if (ret & IB_MAD_RESULT_SUCCESS) { |
| 2295 | if (ret & IB_MAD_RESULT_CONSUMED) |
| 2296 | goto out; |
| 2297 | if (ret & IB_MAD_RESULT_REPLY) { |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2298 | agent_send_response((const struct ib_mad_hdr *)response->mad, |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2299 | &recv->grh, wc, |
| 2300 | port_priv->device, |
Hal Rosenstock | 1bae4db | 2007-05-14 17:21:52 -0400 | [diff] [blame] | 2301 | port_num, |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2302 | qp_info->qp->qp_num, |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2303 | mad_size, opa); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2304 | goto out; |
| 2305 | } |
| 2306 | } |
| 2307 | } |
| 2308 | |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2309 | mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2310 | if (mad_agent) { |
Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 2311 | ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2312 | /* |
| 2313 | * recv is freed up in error cases in ib_mad_complete_recv |
| 2314 | * or via recv_handler in ib_mad_complete_recv() |
| 2315 | */ |
| 2316 | recv = NULL; |
Jack Morgenstein | a9e7432 | 2012-04-24 16:08:57 -0700 | [diff] [blame] | 2317 | } else if ((ret & IB_MAD_RESULT_SUCCESS) && |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2318 | generate_unmatched_resp(recv, response, &mad_size, opa)) { |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2319 | agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, |
| 2320 | port_priv->device, port_num, |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2321 | qp_info->qp->qp_num, mad_size, opa); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2322 | } |
| 2323 | |
| 2324 | out: |
| 2325 | /* Post another receive request for this QP */ |
| 2326 | if (response) { |
| 2327 | ib_mad_post_receive_mads(qp_info, response); |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2328 | kfree(recv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2329 | } else |
| 2330 | ib_mad_post_receive_mads(qp_info, recv); |
| 2331 | } |
| 2332 | |
| 2333 | static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) |
| 2334 | { |
| 2335 | struct ib_mad_send_wr_private *mad_send_wr; |
| 2336 | unsigned long delay; |
| 2337 | |
| 2338 | if (list_empty(&mad_agent_priv->wait_list)) { |
Tejun Heo | 136b572 | 2012-08-21 13:18:24 -0700 | [diff] [blame] | 2339 | cancel_delayed_work(&mad_agent_priv->timed_work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2340 | } else { |
| 2341 | mad_send_wr = list_entry(mad_agent_priv->wait_list.next, |
| 2342 | struct ib_mad_send_wr_private, |
| 2343 | agent_list); |
| 2344 | |
| 2345 | if (time_after(mad_agent_priv->timeout, |
| 2346 | mad_send_wr->timeout)) { |
| 2347 | mad_agent_priv->timeout = mad_send_wr->timeout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2348 | delay = mad_send_wr->timeout - jiffies; |
| 2349 | if ((long)delay <= 0) |
| 2350 | delay = 1; |
Tejun Heo | e7c2f96 | 2012-08-21 13:18:24 -0700 | [diff] [blame] | 2351 | mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, |
| 2352 | &mad_agent_priv->timed_work, delay); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2353 | } |
| 2354 | } |
| 2355 | } |
| 2356 | |
Hal Rosenstock | d760ce8 | 2005-07-27 11:45:25 -0700 | [diff] [blame] | 2357 | static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2358 | { |
Hal Rosenstock | d760ce8 | 2005-07-27 11:45:25 -0700 | [diff] [blame] | 2359 | struct ib_mad_agent_private *mad_agent_priv; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2360 | struct ib_mad_send_wr_private *temp_mad_send_wr; |
| 2361 | struct list_head *list_item; |
| 2362 | unsigned long delay; |
| 2363 | |
Hal Rosenstock | d760ce8 | 2005-07-27 11:45:25 -0700 | [diff] [blame] | 2364 | mad_agent_priv = mad_send_wr->mad_agent_priv; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2365 | list_del(&mad_send_wr->agent_list); |
| 2366 | |
| 2367 | delay = mad_send_wr->timeout; |
| 2368 | mad_send_wr->timeout += jiffies; |
| 2369 | |
Hal Rosenstock | 29bb33d | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2370 | if (delay) { |
| 2371 | list_for_each_prev(list_item, &mad_agent_priv->wait_list) { |
| 2372 | temp_mad_send_wr = list_entry(list_item, |
| 2373 | struct ib_mad_send_wr_private, |
| 2374 | agent_list); |
| 2375 | if (time_after(mad_send_wr->timeout, |
| 2376 | temp_mad_send_wr->timeout)) |
| 2377 | break; |
| 2378 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2379 | } |
Hal Rosenstock | 29bb33d | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2380 | else |
| 2381 | list_item = &mad_agent_priv->wait_list; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2382 | list_add(&mad_send_wr->agent_list, list_item); |
| 2383 | |
| 2384 | /* Reschedule a work item if we have a shorter timeout */ |
Tejun Heo | e7c2f96 | 2012-08-21 13:18:24 -0700 | [diff] [blame] | 2385 | if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) |
| 2386 | mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, |
| 2387 | &mad_agent_priv->timed_work, delay); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2388 | } |
| 2389 | |
Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2390 | void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, |
| 2391 | int timeout_ms) |
| 2392 | { |
| 2393 | mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); |
| 2394 | wait_for_response(mad_send_wr); |
| 2395 | } |
| 2396 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2397 | /* |
| 2398 | * Process a send work completion |
| 2399 | */ |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 2400 | void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, |
| 2401 | struct ib_mad_send_wc *mad_send_wc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2402 | { |
| 2403 | struct ib_mad_agent_private *mad_agent_priv; |
| 2404 | unsigned long flags; |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 2405 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2406 | |
Hal Rosenstock | d760ce8 | 2005-07-27 11:45:25 -0700 | [diff] [blame] | 2407 | mad_agent_priv = mad_send_wr->mad_agent_priv; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2408 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
Ira Weiny | 1471cb6 | 2014-08-08 19:00:56 -0400 | [diff] [blame] | 2409 | if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 2410 | ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); |
| 2411 | if (ret == IB_RMPP_RESULT_CONSUMED) |
| 2412 | goto done; |
| 2413 | } else |
| 2414 | ret = IB_RMPP_RESULT_UNHANDLED; |
| 2415 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2416 | if (mad_send_wc->status != IB_WC_SUCCESS && |
| 2417 | mad_send_wr->status == IB_WC_SUCCESS) { |
| 2418 | mad_send_wr->status = mad_send_wc->status; |
| 2419 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); |
| 2420 | } |
| 2421 | |
| 2422 | if (--mad_send_wr->refcount > 0) { |
| 2423 | if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && |
| 2424 | mad_send_wr->status == IB_WC_SUCCESS) { |
Hal Rosenstock | d760ce8 | 2005-07-27 11:45:25 -0700 | [diff] [blame] | 2425 | wait_for_response(mad_send_wr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2426 | } |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 2427 | goto done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2428 | } |
| 2429 | |
| 2430 | /* Remove send from MAD agent and notify client of completion */ |
| 2431 | list_del(&mad_send_wr->agent_list); |
| 2432 | adjust_timeout(mad_agent_priv); |
| 2433 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
| 2434 | |
| 2435 | if (mad_send_wr->status != IB_WC_SUCCESS ) |
| 2436 | mad_send_wc->status = mad_send_wr->status; |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2437 | if (ret == IB_RMPP_RESULT_INTERNAL) |
| 2438 | ib_rmpp_send_handler(mad_send_wc); |
| 2439 | else |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 2440 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
| 2441 | mad_send_wc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2442 | |
| 2443 | /* Release reference on agent taken when sending */ |
Sean Hefty | 1b52fa98 | 2006-05-12 14:57:52 -0700 | [diff] [blame] | 2444 | deref_mad_agent(mad_agent_priv); |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 2445 | return; |
| 2446 | done: |
| 2447 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2448 | } |
| 2449 | |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 2450 | static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2451 | { |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 2452 | struct ib_mad_port_private *port_priv = cq->cq_context; |
| 2453 | struct ib_mad_list_head *mad_list = |
| 2454 | container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2455 | struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2456 | struct ib_mad_qp_info *qp_info; |
| 2457 | struct ib_mad_queue *send_queue; |
| 2458 | struct ib_send_wr *bad_send_wr; |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2459 | struct ib_mad_send_wc mad_send_wc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2460 | unsigned long flags; |
| 2461 | int ret; |
| 2462 | |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 2463 | if (list_empty_careful(&port_priv->port_list)) |
| 2464 | return; |
| 2465 | |
| 2466 | if (wc->status != IB_WC_SUCCESS) { |
| 2467 | if (!ib_mad_send_error(port_priv, wc)) |
| 2468 | return; |
| 2469 | } |
| 2470 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2471 | mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, |
| 2472 | mad_list); |
| 2473 | send_queue = mad_list->mad_queue; |
| 2474 | qp_info = send_queue->qp_info; |
| 2475 | |
| 2476 | retry: |
Ralph Campbell | 1527106 | 2006-12-12 14:28:30 -0800 | [diff] [blame] | 2477 | ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, |
| 2478 | mad_send_wr->header_mapping, |
| 2479 | mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); |
| 2480 | ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, |
| 2481 | mad_send_wr->payload_mapping, |
| 2482 | mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2483 | queued_send_wr = NULL; |
| 2484 | spin_lock_irqsave(&send_queue->lock, flags); |
| 2485 | list_del(&mad_list->list); |
| 2486 | |
| 2487 | /* Move queued send to the send queue */ |
| 2488 | if (send_queue->count-- > send_queue->max_active) { |
| 2489 | mad_list = container_of(qp_info->overflow_list.next, |
| 2490 | struct ib_mad_list_head, list); |
| 2491 | queued_send_wr = container_of(mad_list, |
| 2492 | struct ib_mad_send_wr_private, |
| 2493 | mad_list); |
Akinobu Mita | 179e091 | 2006-06-26 00:24:41 -0700 | [diff] [blame] | 2494 | list_move_tail(&mad_list->list, &send_queue->list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2495 | } |
| 2496 | spin_unlock_irqrestore(&send_queue->lock, flags); |
| 2497 | |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2498 | mad_send_wc.send_buf = &mad_send_wr->send_buf; |
| 2499 | mad_send_wc.status = wc->status; |
| 2500 | mad_send_wc.vendor_err = wc->vendor_err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2501 | if (atomic_read(&qp_info->snoop_count)) |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2502 | snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2503 | IB_MAD_SNOOP_SEND_COMPLETIONS); |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2504 | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2505 | |
| 2506 | if (queued_send_wr) { |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 2507 | ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2508 | &bad_send_wr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2509 | if (ret) { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 2510 | dev_err(&port_priv->device->dev, |
| 2511 | "ib_post_send failed: %d\n", ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2512 | mad_send_wr = queued_send_wr; |
| 2513 | wc->status = IB_WC_LOC_QP_OP_ERR; |
| 2514 | goto retry; |
| 2515 | } |
| 2516 | } |
| 2517 | } |
| 2518 | |
| 2519 | static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) |
| 2520 | { |
| 2521 | struct ib_mad_send_wr_private *mad_send_wr; |
| 2522 | struct ib_mad_list_head *mad_list; |
| 2523 | unsigned long flags; |
| 2524 | |
| 2525 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); |
| 2526 | list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { |
| 2527 | mad_send_wr = container_of(mad_list, |
| 2528 | struct ib_mad_send_wr_private, |
| 2529 | mad_list); |
| 2530 | mad_send_wr->retry = 1; |
| 2531 | } |
| 2532 | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); |
| 2533 | } |
| 2534 | |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 2535 | static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, |
| 2536 | struct ib_wc *wc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2537 | { |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 2538 | struct ib_mad_list_head *mad_list = |
| 2539 | container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); |
| 2540 | struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2541 | struct ib_mad_send_wr_private *mad_send_wr; |
| 2542 | int ret; |
| 2543 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2544 | /* |
| 2545 | * Send errors will transition the QP to SQE - move |
| 2546 | * QP to RTS and repost flushed work requests |
| 2547 | */ |
| 2548 | mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, |
| 2549 | mad_list); |
| 2550 | if (wc->status == IB_WC_WR_FLUSH_ERR) { |
| 2551 | if (mad_send_wr->retry) { |
| 2552 | /* Repost send */ |
| 2553 | struct ib_send_wr *bad_send_wr; |
| 2554 | |
| 2555 | mad_send_wr->retry = 0; |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 2556 | ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2557 | &bad_send_wr); |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 2558 | if (!ret) |
| 2559 | return false; |
| 2560 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2561 | } else { |
| 2562 | struct ib_qp_attr *attr; |
| 2563 | |
| 2564 | /* Transition QP to RTS and fail offending send */ |
| 2565 | attr = kmalloc(sizeof *attr, GFP_KERNEL); |
| 2566 | if (attr) { |
| 2567 | attr->qp_state = IB_QPS_RTS; |
| 2568 | attr->cur_qp_state = IB_QPS_SQE; |
| 2569 | ret = ib_modify_qp(qp_info->qp, attr, |
| 2570 | IB_QP_STATE | IB_QP_CUR_STATE); |
| 2571 | kfree(attr); |
| 2572 | if (ret) |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 2573 | dev_err(&port_priv->device->dev, |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 2574 | "%s - ib_modify_qp to RTS: %d\n", |
| 2575 | __func__, ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2576 | else |
| 2577 | mark_sends_for_retry(qp_info); |
| 2578 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2579 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2580 | |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 2581 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2582 | } |
| 2583 | |
| 2584 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) |
| 2585 | { |
| 2586 | unsigned long flags; |
| 2587 | struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; |
| 2588 | struct ib_mad_send_wc mad_send_wc; |
| 2589 | struct list_head cancel_list; |
| 2590 | |
| 2591 | INIT_LIST_HEAD(&cancel_list); |
| 2592 | |
| 2593 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
| 2594 | list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, |
| 2595 | &mad_agent_priv->send_list, agent_list) { |
| 2596 | if (mad_send_wr->status == IB_WC_SUCCESS) { |
Roland Dreier | 3cd9656 | 2006-09-22 15:22:46 -0700 | [diff] [blame] | 2597 | mad_send_wr->status = IB_WC_WR_FLUSH_ERR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2598 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); |
| 2599 | } |
| 2600 | } |
| 2601 | |
| 2602 | /* Empty wait list to prevent receives from finding a request */ |
| 2603 | list_splice_init(&mad_agent_priv->wait_list, &cancel_list); |
| 2604 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
| 2605 | |
| 2606 | /* Report all cancelled requests */ |
| 2607 | mad_send_wc.status = IB_WC_WR_FLUSH_ERR; |
| 2608 | mad_send_wc.vendor_err = 0; |
| 2609 | |
| 2610 | list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, |
| 2611 | &cancel_list, agent_list) { |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2612 | mad_send_wc.send_buf = &mad_send_wr->send_buf; |
| 2613 | list_del(&mad_send_wr->agent_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2614 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
| 2615 | &mad_send_wc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2616 | atomic_dec(&mad_agent_priv->refcount); |
| 2617 | } |
| 2618 | } |
| 2619 | |
| 2620 | static struct ib_mad_send_wr_private* |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2621 | find_send_wr(struct ib_mad_agent_private *mad_agent_priv, |
| 2622 | struct ib_mad_send_buf *send_buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2623 | { |
| 2624 | struct ib_mad_send_wr_private *mad_send_wr; |
| 2625 | |
| 2626 | list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, |
| 2627 | agent_list) { |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2628 | if (&mad_send_wr->send_buf == send_buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2629 | return mad_send_wr; |
| 2630 | } |
| 2631 | |
| 2632 | list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, |
| 2633 | agent_list) { |
Ira Weiny | c597eee | 2015-05-08 13:10:03 -0400 | [diff] [blame] | 2634 | if (is_rmpp_data_mad(mad_agent_priv, |
| 2635 | mad_send_wr->send_buf.mad) && |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2636 | &mad_send_wr->send_buf == send_buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2637 | return mad_send_wr; |
| 2638 | } |
| 2639 | return NULL; |
| 2640 | } |
| 2641 | |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2642 | int ib_modify_mad(struct ib_mad_agent *mad_agent, |
| 2643 | struct ib_mad_send_buf *send_buf, u32 timeout_ms) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2644 | { |
| 2645 | struct ib_mad_agent_private *mad_agent_priv; |
| 2646 | struct ib_mad_send_wr_private *mad_send_wr; |
| 2647 | unsigned long flags; |
Hal Rosenstock | cabe3cb | 2005-07-27 11:45:33 -0700 | [diff] [blame] | 2648 | int active; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2649 | |
| 2650 | mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, |
| 2651 | agent); |
| 2652 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2653 | mad_send_wr = find_send_wr(mad_agent_priv, send_buf); |
Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2654 | if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2655 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2656 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2657 | } |
| 2658 | |
Hal Rosenstock | cabe3cb | 2005-07-27 11:45:33 -0700 | [diff] [blame] | 2659 | active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); |
Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2660 | if (!timeout_ms) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2661 | mad_send_wr->status = IB_WC_WR_FLUSH_ERR; |
Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2662 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2663 | } |
| 2664 | |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2665 | mad_send_wr->send_buf.timeout_ms = timeout_ms; |
Hal Rosenstock | cabe3cb | 2005-07-27 11:45:33 -0700 | [diff] [blame] | 2666 | if (active) |
Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2667 | mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); |
| 2668 | else |
| 2669 | ib_reset_mad_timeout(mad_send_wr, timeout_ms); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2670 | |
Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2671 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
| 2672 | return 0; |
| 2673 | } |
| 2674 | EXPORT_SYMBOL(ib_modify_mad); |
| 2675 | |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2676 | void ib_cancel_mad(struct ib_mad_agent *mad_agent, |
| 2677 | struct ib_mad_send_buf *send_buf) |
Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2678 | { |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2679 | ib_modify_mad(mad_agent, send_buf, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2680 | } |
| 2681 | EXPORT_SYMBOL(ib_cancel_mad); |
| 2682 | |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 2683 | static void local_completions(struct work_struct *work) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2684 | { |
| 2685 | struct ib_mad_agent_private *mad_agent_priv; |
| 2686 | struct ib_mad_local_private *local; |
| 2687 | struct ib_mad_agent_private *recv_mad_agent; |
| 2688 | unsigned long flags; |
Ralph Campbell | 1d9bc6d6 | 2009-02-27 10:34:30 -0800 | [diff] [blame] | 2689 | int free_mad; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2690 | struct ib_wc wc; |
| 2691 | struct ib_mad_send_wc mad_send_wc; |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2692 | bool opa; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2693 | |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 2694 | mad_agent_priv = |
| 2695 | container_of(work, struct ib_mad_agent_private, local_work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2696 | |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2697 | opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, |
| 2698 | mad_agent_priv->qp_info->port_priv->port_num); |
| 2699 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2700 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
| 2701 | while (!list_empty(&mad_agent_priv->local_list)) { |
| 2702 | local = list_entry(mad_agent_priv->local_list.next, |
| 2703 | struct ib_mad_local_private, |
| 2704 | completion_list); |
Michael S. Tsirkin | 37289ef | 2006-03-30 15:52:54 +0200 | [diff] [blame] | 2705 | list_del(&local->completion_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2706 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
Ralph Campbell | 1d9bc6d6 | 2009-02-27 10:34:30 -0800 | [diff] [blame] | 2707 | free_mad = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2708 | if (local->mad_priv) { |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2709 | u8 base_version; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2710 | recv_mad_agent = local->recv_mad_agent; |
| 2711 | if (!recv_mad_agent) { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 2712 | dev_err(&mad_agent_priv->agent.device->dev, |
| 2713 | "No receive MAD agent for local completion\n"); |
Ralph Campbell | 1d9bc6d6 | 2009-02-27 10:34:30 -0800 | [diff] [blame] | 2714 | free_mad = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2715 | goto local_send_completion; |
| 2716 | } |
| 2717 | |
| 2718 | /* |
| 2719 | * Defined behavior is to complete response |
| 2720 | * before request |
| 2721 | */ |
Michael S. Tsirkin | 062dbb6 | 2006-12-31 21:09:42 +0200 | [diff] [blame] | 2722 | build_smp_wc(recv_mad_agent->agent.qp, |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 2723 | local->mad_send_wr->send_wr.wr.wr_cqe, |
Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 2724 | be16_to_cpu(IB_LID_PERMISSIVE), |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 2725 | local->mad_send_wr->send_wr.pkey_index, |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2726 | recv_mad_agent->agent.port_num, &wc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2727 | |
| 2728 | local->mad_priv->header.recv_wc.wc = &wc; |
Ira Weiny | 8e4349d | 2015-06-10 16:16:48 -0400 | [diff] [blame] | 2729 | |
| 2730 | base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version; |
| 2731 | if (opa && base_version == OPA_MGMT_BASE_VERSION) { |
| 2732 | local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len; |
| 2733 | local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); |
| 2734 | } else { |
| 2735 | local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad); |
| 2736 | local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); |
| 2737 | } |
| 2738 | |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 2739 | INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); |
| 2740 | list_add(&local->mad_priv->header.recv_wc.recv_buf.list, |
| 2741 | &local->mad_priv->header.recv_wc.rmpp_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2742 | local->mad_priv->header.recv_wc.recv_buf.grh = NULL; |
| 2743 | local->mad_priv->header.recv_wc.recv_buf.mad = |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2744 | (struct ib_mad *)local->mad_priv->mad; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2745 | if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) |
| 2746 | snoop_recv(recv_mad_agent->qp_info, |
| 2747 | &local->mad_priv->header.recv_wc, |
| 2748 | IB_MAD_SNOOP_RECVS); |
| 2749 | recv_mad_agent->agent.recv_handler( |
| 2750 | &recv_mad_agent->agent, |
Christoph Hellwig | ca28126 | 2016-01-04 14:15:58 +0100 | [diff] [blame] | 2751 | &local->mad_send_wr->send_buf, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2752 | &local->mad_priv->header.recv_wc); |
| 2753 | spin_lock_irqsave(&recv_mad_agent->lock, flags); |
| 2754 | atomic_dec(&recv_mad_agent->refcount); |
| 2755 | spin_unlock_irqrestore(&recv_mad_agent->lock, flags); |
| 2756 | } |
| 2757 | |
| 2758 | local_send_completion: |
| 2759 | /* Complete send */ |
| 2760 | mad_send_wc.status = IB_WC_SUCCESS; |
| 2761 | mad_send_wc.vendor_err = 0; |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2762 | mad_send_wc.send_buf = &local->mad_send_wr->send_buf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2763 | if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2764 | snoop_send(mad_agent_priv->qp_info, |
| 2765 | &local->mad_send_wr->send_buf, |
| 2766 | &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2767 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
| 2768 | &mad_send_wc); |
| 2769 | |
| 2770 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2771 | atomic_dec(&mad_agent_priv->refcount); |
Ralph Campbell | 1d9bc6d6 | 2009-02-27 10:34:30 -0800 | [diff] [blame] | 2772 | if (free_mad) |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2773 | kfree(local->mad_priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2774 | kfree(local); |
| 2775 | } |
| 2776 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
| 2777 | } |
| 2778 | |
Hal Rosenstock | f75b7a5 | 2005-07-27 11:45:29 -0700 | [diff] [blame] | 2779 | static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) |
| 2780 | { |
| 2781 | int ret; |
| 2782 | |
Sean Hefty | 4fc8cd4 | 2007-11-27 00:11:04 -0800 | [diff] [blame] | 2783 | if (!mad_send_wr->retries_left) |
Hal Rosenstock | f75b7a5 | 2005-07-27 11:45:29 -0700 | [diff] [blame] | 2784 | return -ETIMEDOUT; |
| 2785 | |
Sean Hefty | 4fc8cd4 | 2007-11-27 00:11:04 -0800 | [diff] [blame] | 2786 | mad_send_wr->retries_left--; |
| 2787 | mad_send_wr->send_buf.retries++; |
| 2788 | |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2789 | mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); |
Hal Rosenstock | f75b7a5 | 2005-07-27 11:45:29 -0700 | [diff] [blame] | 2790 | |
Ira Weiny | 1471cb6 | 2014-08-08 19:00:56 -0400 | [diff] [blame] | 2791 | if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { |
Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 2792 | ret = ib_retry_rmpp(mad_send_wr); |
| 2793 | switch (ret) { |
| 2794 | case IB_RMPP_RESULT_UNHANDLED: |
| 2795 | ret = ib_send_mad(mad_send_wr); |
| 2796 | break; |
| 2797 | case IB_RMPP_RESULT_CONSUMED: |
| 2798 | ret = 0; |
| 2799 | break; |
| 2800 | default: |
| 2801 | ret = -ECOMM; |
| 2802 | break; |
| 2803 | } |
| 2804 | } else |
| 2805 | ret = ib_send_mad(mad_send_wr); |
Hal Rosenstock | f75b7a5 | 2005-07-27 11:45:29 -0700 | [diff] [blame] | 2806 | |
| 2807 | if (!ret) { |
| 2808 | mad_send_wr->refcount++; |
Hal Rosenstock | f75b7a5 | 2005-07-27 11:45:29 -0700 | [diff] [blame] | 2809 | list_add_tail(&mad_send_wr->agent_list, |
| 2810 | &mad_send_wr->mad_agent_priv->send_list); |
| 2811 | } |
| 2812 | return ret; |
| 2813 | } |
| 2814 | |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 2815 | static void timeout_sends(struct work_struct *work) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2816 | { |
| 2817 | struct ib_mad_agent_private *mad_agent_priv; |
| 2818 | struct ib_mad_send_wr_private *mad_send_wr; |
| 2819 | struct ib_mad_send_wc mad_send_wc; |
| 2820 | unsigned long flags, delay; |
| 2821 | |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 2822 | mad_agent_priv = container_of(work, struct ib_mad_agent_private, |
| 2823 | timed_work.work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2824 | mad_send_wc.vendor_err = 0; |
| 2825 | |
| 2826 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
| 2827 | while (!list_empty(&mad_agent_priv->wait_list)) { |
| 2828 | mad_send_wr = list_entry(mad_agent_priv->wait_list.next, |
| 2829 | struct ib_mad_send_wr_private, |
| 2830 | agent_list); |
| 2831 | |
| 2832 | if (time_after(mad_send_wr->timeout, jiffies)) { |
| 2833 | delay = mad_send_wr->timeout - jiffies; |
| 2834 | if ((long)delay <= 0) |
| 2835 | delay = 1; |
| 2836 | queue_delayed_work(mad_agent_priv->qp_info-> |
| 2837 | port_priv->wq, |
| 2838 | &mad_agent_priv->timed_work, delay); |
| 2839 | break; |
| 2840 | } |
| 2841 | |
Hal Rosenstock | dbf9227 | 2005-07-27 11:45:30 -0700 | [diff] [blame] | 2842 | list_del(&mad_send_wr->agent_list); |
Hal Rosenstock | 29bb33d | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2843 | if (mad_send_wr->status == IB_WC_SUCCESS && |
| 2844 | !retry_send(mad_send_wr)) |
Hal Rosenstock | f75b7a5 | 2005-07-27 11:45:29 -0700 | [diff] [blame] | 2845 | continue; |
| 2846 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2847 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
| 2848 | |
Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2849 | if (mad_send_wr->status == IB_WC_SUCCESS) |
| 2850 | mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; |
| 2851 | else |
| 2852 | mad_send_wc.status = mad_send_wr->status; |
Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2853 | mad_send_wc.send_buf = &mad_send_wr->send_buf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2854 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
| 2855 | &mad_send_wc); |
| 2856 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2857 | atomic_dec(&mad_agent_priv->refcount); |
| 2858 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
| 2859 | } |
| 2860 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
| 2861 | } |
| 2862 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2863 | /* |
| 2864 | * Allocate receive MADs and post receive WRs for them |
| 2865 | */ |
| 2866 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, |
| 2867 | struct ib_mad_private *mad) |
| 2868 | { |
| 2869 | unsigned long flags; |
| 2870 | int post, ret; |
| 2871 | struct ib_mad_private *mad_priv; |
| 2872 | struct ib_sge sg_list; |
| 2873 | struct ib_recv_wr recv_wr, *bad_recv_wr; |
| 2874 | struct ib_mad_queue *recv_queue = &qp_info->recv_queue; |
| 2875 | |
| 2876 | /* Initialize common scatter list fields */ |
Jason Gunthorpe | 4be90bc | 2015-07-30 17:22:16 -0600 | [diff] [blame] | 2877 | sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2878 | |
| 2879 | /* Initialize common receive WR fields */ |
| 2880 | recv_wr.next = NULL; |
| 2881 | recv_wr.sg_list = &sg_list; |
| 2882 | recv_wr.num_sge = 1; |
| 2883 | |
| 2884 | do { |
| 2885 | /* Allocate and map receive buffer */ |
| 2886 | if (mad) { |
| 2887 | mad_priv = mad; |
| 2888 | mad = NULL; |
| 2889 | } else { |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2890 | mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), |
| 2891 | GFP_ATOMIC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2892 | if (!mad_priv) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2893 | ret = -ENOMEM; |
| 2894 | break; |
| 2895 | } |
| 2896 | } |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2897 | sg_list.length = mad_priv_dma_size(mad_priv); |
Ralph Campbell | 1527106 | 2006-12-12 14:28:30 -0800 | [diff] [blame] | 2898 | sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, |
| 2899 | &mad_priv->grh, |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2900 | mad_priv_dma_size(mad_priv), |
Ralph Campbell | 1527106 | 2006-12-12 14:28:30 -0800 | [diff] [blame] | 2901 | DMA_FROM_DEVICE); |
Yan Burman | 2c34e68 | 2014-03-11 14:41:47 +0200 | [diff] [blame] | 2902 | if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, |
| 2903 | sg_list.addr))) { |
| 2904 | ret = -ENOMEM; |
| 2905 | break; |
| 2906 | } |
Ralph Campbell | 1527106 | 2006-12-12 14:28:30 -0800 | [diff] [blame] | 2907 | mad_priv->header.mapping = sg_list.addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2908 | mad_priv->header.mad_list.mad_queue = recv_queue; |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 2909 | mad_priv->header.mad_list.cqe.done = ib_mad_recv_done; |
| 2910 | recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2911 | |
| 2912 | /* Post receive WR */ |
| 2913 | spin_lock_irqsave(&recv_queue->lock, flags); |
| 2914 | post = (++recv_queue->count < recv_queue->max_active); |
| 2915 | list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); |
| 2916 | spin_unlock_irqrestore(&recv_queue->lock, flags); |
| 2917 | ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); |
| 2918 | if (ret) { |
| 2919 | spin_lock_irqsave(&recv_queue->lock, flags); |
| 2920 | list_del(&mad_priv->header.mad_list.list); |
| 2921 | recv_queue->count--; |
| 2922 | spin_unlock_irqrestore(&recv_queue->lock, flags); |
Ralph Campbell | 1527106 | 2006-12-12 14:28:30 -0800 | [diff] [blame] | 2923 | ib_dma_unmap_single(qp_info->port_priv->device, |
| 2924 | mad_priv->header.mapping, |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2925 | mad_priv_dma_size(mad_priv), |
Ralph Campbell | 1527106 | 2006-12-12 14:28:30 -0800 | [diff] [blame] | 2926 | DMA_FROM_DEVICE); |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2927 | kfree(mad_priv); |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 2928 | dev_err(&qp_info->port_priv->device->dev, |
| 2929 | "ib_post_recv failed: %d\n", ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2930 | break; |
| 2931 | } |
| 2932 | } while (post); |
| 2933 | |
| 2934 | return ret; |
| 2935 | } |
| 2936 | |
| 2937 | /* |
| 2938 | * Return all the posted receive MADs |
| 2939 | */ |
| 2940 | static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) |
| 2941 | { |
| 2942 | struct ib_mad_private_header *mad_priv_hdr; |
| 2943 | struct ib_mad_private *recv; |
| 2944 | struct ib_mad_list_head *mad_list; |
| 2945 | |
Eli Cohen | fac70d5 | 2010-09-27 17:51:11 -0700 | [diff] [blame] | 2946 | if (!qp_info->qp) |
| 2947 | return; |
| 2948 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2949 | while (!list_empty(&qp_info->recv_queue.list)) { |
| 2950 | |
| 2951 | mad_list = list_entry(qp_info->recv_queue.list.next, |
| 2952 | struct ib_mad_list_head, list); |
| 2953 | mad_priv_hdr = container_of(mad_list, |
| 2954 | struct ib_mad_private_header, |
| 2955 | mad_list); |
| 2956 | recv = container_of(mad_priv_hdr, struct ib_mad_private, |
| 2957 | header); |
| 2958 | |
| 2959 | /* Remove from posted receive MAD list */ |
| 2960 | list_del(&mad_list->list); |
| 2961 | |
Ralph Campbell | 1527106 | 2006-12-12 14:28:30 -0800 | [diff] [blame] | 2962 | ib_dma_unmap_single(qp_info->port_priv->device, |
| 2963 | recv->header.mapping, |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2964 | mad_priv_dma_size(recv), |
Ralph Campbell | 1527106 | 2006-12-12 14:28:30 -0800 | [diff] [blame] | 2965 | DMA_FROM_DEVICE); |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 2966 | kfree(recv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2967 | } |
| 2968 | |
| 2969 | qp_info->recv_queue.count = 0; |
| 2970 | } |
| 2971 | |
| 2972 | /* |
| 2973 | * Start the port |
| 2974 | */ |
| 2975 | static int ib_mad_port_start(struct ib_mad_port_private *port_priv) |
| 2976 | { |
| 2977 | int ret, i; |
| 2978 | struct ib_qp_attr *attr; |
| 2979 | struct ib_qp *qp; |
Jack Morgenstein | ef5ed41 | 2013-07-18 14:02:29 +0300 | [diff] [blame] | 2980 | u16 pkey_index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2981 | |
| 2982 | attr = kmalloc(sizeof *attr, GFP_KERNEL); |
Leon Romanovsky | 2716243 | 2016-11-03 16:44:09 +0200 | [diff] [blame] | 2983 | if (!attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2984 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2985 | |
Jack Morgenstein | ef5ed41 | 2013-07-18 14:02:29 +0300 | [diff] [blame] | 2986 | ret = ib_find_pkey(port_priv->device, port_priv->port_num, |
| 2987 | IB_DEFAULT_PKEY_FULL, &pkey_index); |
| 2988 | if (ret) |
| 2989 | pkey_index = 0; |
| 2990 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2991 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { |
| 2992 | qp = port_priv->qp_info[i].qp; |
Eli Cohen | fac70d5 | 2010-09-27 17:51:11 -0700 | [diff] [blame] | 2993 | if (!qp) |
| 2994 | continue; |
| 2995 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2996 | /* |
| 2997 | * PKey index for QP1 is irrelevant but |
| 2998 | * one is needed for the Reset to Init transition |
| 2999 | */ |
| 3000 | attr->qp_state = IB_QPS_INIT; |
Jack Morgenstein | ef5ed41 | 2013-07-18 14:02:29 +0300 | [diff] [blame] | 3001 | attr->pkey_index = pkey_index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3002 | attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; |
| 3003 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | |
| 3004 | IB_QP_PKEY_INDEX | IB_QP_QKEY); |
| 3005 | if (ret) { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3006 | dev_err(&port_priv->device->dev, |
| 3007 | "Couldn't change QP%d state to INIT: %d\n", |
| 3008 | i, ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3009 | goto out; |
| 3010 | } |
| 3011 | |
| 3012 | attr->qp_state = IB_QPS_RTR; |
| 3013 | ret = ib_modify_qp(qp, attr, IB_QP_STATE); |
| 3014 | if (ret) { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3015 | dev_err(&port_priv->device->dev, |
| 3016 | "Couldn't change QP%d state to RTR: %d\n", |
| 3017 | i, ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3018 | goto out; |
| 3019 | } |
| 3020 | |
| 3021 | attr->qp_state = IB_QPS_RTS; |
| 3022 | attr->sq_psn = IB_MAD_SEND_Q_PSN; |
| 3023 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); |
| 3024 | if (ret) { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3025 | dev_err(&port_priv->device->dev, |
| 3026 | "Couldn't change QP%d state to RTS: %d\n", |
| 3027 | i, ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3028 | goto out; |
| 3029 | } |
| 3030 | } |
| 3031 | |
| 3032 | ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); |
| 3033 | if (ret) { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3034 | dev_err(&port_priv->device->dev, |
| 3035 | "Failed to request completion notification: %d\n", |
| 3036 | ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3037 | goto out; |
| 3038 | } |
| 3039 | |
| 3040 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { |
Eli Cohen | fac70d5 | 2010-09-27 17:51:11 -0700 | [diff] [blame] | 3041 | if (!port_priv->qp_info[i].qp) |
| 3042 | continue; |
| 3043 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3044 | ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); |
| 3045 | if (ret) { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3046 | dev_err(&port_priv->device->dev, |
| 3047 | "Couldn't post receive WRs\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3048 | goto out; |
| 3049 | } |
| 3050 | } |
| 3051 | out: |
| 3052 | kfree(attr); |
| 3053 | return ret; |
| 3054 | } |
| 3055 | |
| 3056 | static void qp_event_handler(struct ib_event *event, void *qp_context) |
| 3057 | { |
| 3058 | struct ib_mad_qp_info *qp_info = qp_context; |
| 3059 | |
| 3060 | /* It's worse than that! He's dead, Jim! */ |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3061 | dev_err(&qp_info->port_priv->device->dev, |
| 3062 | "Fatal error (%d) on MAD QP (%d)\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3063 | event->event, qp_info->qp->qp_num); |
| 3064 | } |
| 3065 | |
| 3066 | static void init_mad_queue(struct ib_mad_qp_info *qp_info, |
| 3067 | struct ib_mad_queue *mad_queue) |
| 3068 | { |
| 3069 | mad_queue->qp_info = qp_info; |
| 3070 | mad_queue->count = 0; |
| 3071 | spin_lock_init(&mad_queue->lock); |
| 3072 | INIT_LIST_HEAD(&mad_queue->list); |
| 3073 | } |
| 3074 | |
| 3075 | static void init_mad_qp(struct ib_mad_port_private *port_priv, |
| 3076 | struct ib_mad_qp_info *qp_info) |
| 3077 | { |
| 3078 | qp_info->port_priv = port_priv; |
| 3079 | init_mad_queue(qp_info, &qp_info->send_queue); |
| 3080 | init_mad_queue(qp_info, &qp_info->recv_queue); |
| 3081 | INIT_LIST_HEAD(&qp_info->overflow_list); |
| 3082 | spin_lock_init(&qp_info->snoop_lock); |
| 3083 | qp_info->snoop_table = NULL; |
| 3084 | qp_info->snoop_table_size = 0; |
| 3085 | atomic_set(&qp_info->snoop_count, 0); |
| 3086 | } |
| 3087 | |
| 3088 | static int create_mad_qp(struct ib_mad_qp_info *qp_info, |
| 3089 | enum ib_qp_type qp_type) |
| 3090 | { |
| 3091 | struct ib_qp_init_attr qp_init_attr; |
| 3092 | int ret; |
| 3093 | |
| 3094 | memset(&qp_init_attr, 0, sizeof qp_init_attr); |
| 3095 | qp_init_attr.send_cq = qp_info->port_priv->cq; |
| 3096 | qp_init_attr.recv_cq = qp_info->port_priv->cq; |
| 3097 | qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; |
Hal Rosenstock | b76aabc | 2009-09-07 08:28:48 -0700 | [diff] [blame] | 3098 | qp_init_attr.cap.max_send_wr = mad_sendq_size; |
| 3099 | qp_init_attr.cap.max_recv_wr = mad_recvq_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3100 | qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; |
| 3101 | qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; |
| 3102 | qp_init_attr.qp_type = qp_type; |
| 3103 | qp_init_attr.port_num = qp_info->port_priv->port_num; |
| 3104 | qp_init_attr.qp_context = qp_info; |
| 3105 | qp_init_attr.event_handler = qp_event_handler; |
| 3106 | qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); |
| 3107 | if (IS_ERR(qp_info->qp)) { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3108 | dev_err(&qp_info->port_priv->device->dev, |
| 3109 | "Couldn't create ib_mad QP%d\n", |
| 3110 | get_spl_qp_index(qp_type)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3111 | ret = PTR_ERR(qp_info->qp); |
| 3112 | goto error; |
| 3113 | } |
| 3114 | /* Use minimum queue sizes unless the CQ is resized */ |
Hal Rosenstock | b76aabc | 2009-09-07 08:28:48 -0700 | [diff] [blame] | 3115 | qp_info->send_queue.max_active = mad_sendq_size; |
| 3116 | qp_info->recv_queue.max_active = mad_recvq_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3117 | return 0; |
| 3118 | |
| 3119 | error: |
| 3120 | return ret; |
| 3121 | } |
| 3122 | |
| 3123 | static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) |
| 3124 | { |
Eli Cohen | fac70d5 | 2010-09-27 17:51:11 -0700 | [diff] [blame] | 3125 | if (!qp_info->qp) |
| 3126 | return; |
| 3127 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3128 | ib_destroy_qp(qp_info->qp); |
Jesper Juhl | 6044ec8 | 2005-11-07 01:01:32 -0800 | [diff] [blame] | 3129 | kfree(qp_info->snoop_table); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3130 | } |
| 3131 | |
| 3132 | /* |
| 3133 | * Open the port |
| 3134 | * Create the QP, PD, MR, and CQ if needed |
| 3135 | */ |
| 3136 | static int ib_mad_port_open(struct ib_device *device, |
| 3137 | int port_num) |
| 3138 | { |
| 3139 | int ret, cq_size; |
| 3140 | struct ib_mad_port_private *port_priv; |
| 3141 | unsigned long flags; |
| 3142 | char name[sizeof "ib_mad123"]; |
Eli Cohen | fac70d5 | 2010-09-27 17:51:11 -0700 | [diff] [blame] | 3143 | int has_smi; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3144 | |
Ira Weiny | 337877a | 2015-06-06 14:38:29 -0400 | [diff] [blame] | 3145 | if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE)) |
| 3146 | return -EFAULT; |
| 3147 | |
Ira Weiny | 548ead1 | 2015-06-06 14:38:33 -0400 | [diff] [blame] | 3148 | if (WARN_ON(rdma_cap_opa_mad(device, port_num) && |
| 3149 | rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE)) |
| 3150 | return -EFAULT; |
| 3151 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3152 | /* Create new device info */ |
Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 3153 | port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); |
Leon Romanovsky | 2716243 | 2016-11-03 16:44:09 +0200 | [diff] [blame] | 3154 | if (!port_priv) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3155 | return -ENOMEM; |
Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 3156 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3157 | port_priv->device = device; |
| 3158 | port_priv->port_num = port_num; |
| 3159 | spin_lock_init(&port_priv->reg_lock); |
| 3160 | INIT_LIST_HEAD(&port_priv->agent_list); |
| 3161 | init_mad_qp(port_priv, &port_priv->qp_info[0]); |
| 3162 | init_mad_qp(port_priv, &port_priv->qp_info[1]); |
| 3163 | |
Eli Cohen | fac70d5 | 2010-09-27 17:51:11 -0700 | [diff] [blame] | 3164 | cq_size = mad_sendq_size + mad_recvq_size; |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 3165 | has_smi = rdma_cap_ib_smi(device, port_num); |
Eli Cohen | fac70d5 | 2010-09-27 17:51:11 -0700 | [diff] [blame] | 3166 | if (has_smi) |
| 3167 | cq_size *= 2; |
| 3168 | |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 3169 | port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, |
| 3170 | IB_POLL_WORKQUEUE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3171 | if (IS_ERR(port_priv->cq)) { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3172 | dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3173 | ret = PTR_ERR(port_priv->cq); |
| 3174 | goto error3; |
| 3175 | } |
| 3176 | |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 3177 | port_priv->pd = ib_alloc_pd(device, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3178 | if (IS_ERR(port_priv->pd)) { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3179 | dev_err(&device->dev, "Couldn't create ib_mad PD\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3180 | ret = PTR_ERR(port_priv->pd); |
| 3181 | goto error4; |
| 3182 | } |
| 3183 | |
Eli Cohen | fac70d5 | 2010-09-27 17:51:11 -0700 | [diff] [blame] | 3184 | if (has_smi) { |
| 3185 | ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); |
| 3186 | if (ret) |
| 3187 | goto error6; |
| 3188 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3189 | ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); |
| 3190 | if (ret) |
| 3191 | goto error7; |
| 3192 | |
| 3193 | snprintf(name, sizeof name, "ib_mad%d", port_num); |
Bhaktipriya Shridhar | 1c99e29 | 2016-08-15 23:28:07 +0530 | [diff] [blame] | 3194 | port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3195 | if (!port_priv->wq) { |
| 3196 | ret = -ENOMEM; |
| 3197 | goto error8; |
| 3198 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3199 | |
Michael S. Tsirkin | dc05980 | 2006-03-20 10:08:25 -0800 | [diff] [blame] | 3200 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); |
| 3201 | list_add_tail(&port_priv->port_list, &ib_mad_port_list); |
| 3202 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); |
| 3203 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3204 | ret = ib_mad_port_start(port_priv); |
| 3205 | if (ret) { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3206 | dev_err(&device->dev, "Couldn't start port\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3207 | goto error9; |
| 3208 | } |
| 3209 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3210 | return 0; |
| 3211 | |
| 3212 | error9: |
Michael S. Tsirkin | dc05980 | 2006-03-20 10:08:25 -0800 | [diff] [blame] | 3213 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); |
| 3214 | list_del_init(&port_priv->port_list); |
| 3215 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); |
| 3216 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3217 | destroy_workqueue(port_priv->wq); |
| 3218 | error8: |
| 3219 | destroy_mad_qp(&port_priv->qp_info[1]); |
| 3220 | error7: |
| 3221 | destroy_mad_qp(&port_priv->qp_info[0]); |
| 3222 | error6: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3223 | ib_dealloc_pd(port_priv->pd); |
| 3224 | error4: |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 3225 | ib_free_cq(port_priv->cq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3226 | cleanup_recv_queue(&port_priv->qp_info[1]); |
| 3227 | cleanup_recv_queue(&port_priv->qp_info[0]); |
| 3228 | error3: |
| 3229 | kfree(port_priv); |
| 3230 | |
| 3231 | return ret; |
| 3232 | } |
| 3233 | |
| 3234 | /* |
| 3235 | * Close the port |
| 3236 | * If there are no classes using the port, free the port |
| 3237 | * resources (CQ, MR, PD, QP) and remove the port's info structure |
| 3238 | */ |
| 3239 | static int ib_mad_port_close(struct ib_device *device, int port_num) |
| 3240 | { |
| 3241 | struct ib_mad_port_private *port_priv; |
| 3242 | unsigned long flags; |
| 3243 | |
| 3244 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); |
| 3245 | port_priv = __ib_get_mad_port(device, port_num); |
| 3246 | if (port_priv == NULL) { |
| 3247 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3248 | dev_err(&device->dev, "Port %d not found\n", port_num); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3249 | return -ENODEV; |
| 3250 | } |
Michael S. Tsirkin | dc05980 | 2006-03-20 10:08:25 -0800 | [diff] [blame] | 3251 | list_del_init(&port_priv->port_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3252 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); |
| 3253 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3254 | destroy_workqueue(port_priv->wq); |
| 3255 | destroy_mad_qp(&port_priv->qp_info[1]); |
| 3256 | destroy_mad_qp(&port_priv->qp_info[0]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3257 | ib_dealloc_pd(port_priv->pd); |
Christoph Hellwig | d53e11f | 2016-01-05 22:46:12 -0800 | [diff] [blame] | 3258 | ib_free_cq(port_priv->cq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3259 | cleanup_recv_queue(&port_priv->qp_info[1]); |
| 3260 | cleanup_recv_queue(&port_priv->qp_info[0]); |
| 3261 | /* XXX: Handle deallocation of MAD registration tables */ |
| 3262 | |
| 3263 | kfree(port_priv); |
| 3264 | |
| 3265 | return 0; |
| 3266 | } |
| 3267 | |
| 3268 | static void ib_mad_init_device(struct ib_device *device) |
| 3269 | { |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 3270 | int start, i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3271 | |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 3272 | start = rdma_start_port(device); |
Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 3273 | |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 3274 | for (i = start; i <= rdma_end_port(device); i++) { |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 3275 | if (!rdma_cap_ib_mad(device, i)) |
Michael Wang | 827f2a8 | 2015-05-05 14:50:20 +0200 | [diff] [blame] | 3276 | continue; |
| 3277 | |
Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 3278 | if (ib_mad_port_open(device, i)) { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3279 | dev_err(&device->dev, "Couldn't open port %d\n", i); |
Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 3280 | goto error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3281 | } |
Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 3282 | if (ib_agent_port_open(device, i)) { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3283 | dev_err(&device->dev, |
| 3284 | "Couldn't open port %d for agents\n", i); |
Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 3285 | goto error_agent; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3286 | } |
| 3287 | } |
Hal Rosenstock | f68bcc2 | 2005-07-27 11:45:27 -0700 | [diff] [blame] | 3288 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3289 | |
Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 3290 | error_agent: |
| 3291 | if (ib_mad_port_close(device, i)) |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3292 | dev_err(&device->dev, "Couldn't close port %d\n", i); |
Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 3293 | |
| 3294 | error: |
Michael Wang | 827f2a8 | 2015-05-05 14:50:20 +0200 | [diff] [blame] | 3295 | while (--i >= start) { |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 3296 | if (!rdma_cap_ib_mad(device, i)) |
Michael Wang | 827f2a8 | 2015-05-05 14:50:20 +0200 | [diff] [blame] | 3297 | continue; |
Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 3298 | |
Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 3299 | if (ib_agent_port_close(device, i)) |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3300 | dev_err(&device->dev, |
| 3301 | "Couldn't close port %d for agents\n", i); |
Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 3302 | if (ib_mad_port_close(device, i)) |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3303 | dev_err(&device->dev, "Couldn't close port %d\n", i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3304 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3305 | } |
| 3306 | |
Haggai Eran | 7c1eb45 | 2015-07-30 17:50:14 +0300 | [diff] [blame] | 3307 | static void ib_mad_remove_device(struct ib_device *device, void *client_data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3308 | { |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 3309 | int i; |
Steve Wise | 070e140 | 2010-03-04 18:18:18 +0000 | [diff] [blame] | 3310 | |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 3311 | for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 3312 | if (!rdma_cap_ib_mad(device, i)) |
Michael Wang | 827f2a8 | 2015-05-05 14:50:20 +0200 | [diff] [blame] | 3313 | continue; |
| 3314 | |
| 3315 | if (ib_agent_port_close(device, i)) |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3316 | dev_err(&device->dev, |
Michael Wang | 827f2a8 | 2015-05-05 14:50:20 +0200 | [diff] [blame] | 3317 | "Couldn't close port %d for agents\n", i); |
| 3318 | if (ib_mad_port_close(device, i)) |
| 3319 | dev_err(&device->dev, "Couldn't close port %d\n", i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3320 | } |
| 3321 | } |
| 3322 | |
| 3323 | static struct ib_client mad_client = { |
| 3324 | .name = "mad", |
| 3325 | .add = ib_mad_init_device, |
| 3326 | .remove = ib_mad_remove_device |
| 3327 | }; |
| 3328 | |
Mark Bloch | 4c2cb42 | 2016-05-19 17:12:32 +0300 | [diff] [blame] | 3329 | int ib_mad_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3330 | { |
Hal Rosenstock | b76aabc | 2009-09-07 08:28:48 -0700 | [diff] [blame] | 3331 | mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); |
| 3332 | mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); |
| 3333 | |
| 3334 | mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); |
| 3335 | mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); |
| 3336 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3337 | INIT_LIST_HEAD(&ib_mad_port_list); |
| 3338 | |
| 3339 | if (ib_register_client(&mad_client)) { |
Ira Weiny | 7ef5d4b | 2014-08-08 19:00:53 -0400 | [diff] [blame] | 3340 | pr_err("Couldn't register ib_mad client\n"); |
Ira Weiny | c9082e5 | 2015-06-06 14:38:30 -0400 | [diff] [blame] | 3341 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3342 | } |
| 3343 | |
| 3344 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3345 | } |
| 3346 | |
Mark Bloch | 4c2cb42 | 2016-05-19 17:12:32 +0300 | [diff] [blame] | 3347 | void ib_mad_cleanup(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3348 | { |
| 3349 | ib_unregister_client(&mad_client); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3350 | } |