| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 3 |  * Copyright (c) 2005 Intel Corporation.  All rights reserved. | 
 | 4 |  * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 |  * | 
 | 6 |  * This software is available to you under a choice of one of two | 
 | 7 |  * licenses.  You may choose to be licensed under the terms of the GNU | 
 | 8 |  * General Public License (GPL) Version 2, available from the file | 
 | 9 |  * COPYING in the main directory of this source tree, or the | 
 | 10 |  * OpenIB.org BSD license below: | 
 | 11 |  * | 
 | 12 |  *     Redistribution and use in source and binary forms, with or | 
 | 13 |  *     without modification, are permitted provided that the following | 
 | 14 |  *     conditions are met: | 
 | 15 |  * | 
 | 16 |  *      - Redistributions of source code must retain the above | 
 | 17 |  *        copyright notice, this list of conditions and the following | 
 | 18 |  *        disclaimer. | 
 | 19 |  * | 
 | 20 |  *      - Redistributions in binary form must reproduce the above | 
 | 21 |  *        copyright notice, this list of conditions and the following | 
 | 22 |  *        disclaimer in the documentation and/or other materials | 
 | 23 |  *        provided with the distribution. | 
 | 24 |  * | 
 | 25 |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 
 | 26 |  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 
 | 27 |  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | 
 | 28 |  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | 
 | 29 |  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | 
 | 30 |  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 
 | 31 |  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 
 | 32 |  * SOFTWARE. | 
 | 33 |  * | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 34 |  * $Id: mad.c 2817 2005-07-07 11:29:26Z halr $ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <linux/dma-mapping.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 |  | 
 | 38 | #include "mad_priv.h" | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 39 | #include "mad_rmpp.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | #include "smi.h" | 
 | 41 | #include "agent.h" | 
 | 42 |  | 
 | 43 | MODULE_LICENSE("Dual BSD/GPL"); | 
 | 44 | MODULE_DESCRIPTION("kernel IB MAD API"); | 
 | 45 | MODULE_AUTHOR("Hal Rosenstock"); | 
 | 46 | MODULE_AUTHOR("Sean Hefty"); | 
 | 47 |  | 
 | 48 |  | 
 | 49 | kmem_cache_t *ib_mad_cache; | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 50 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | static struct list_head ib_mad_port_list; | 
 | 52 | static u32 ib_mad_client_id = 0; | 
 | 53 |  | 
 | 54 | /* Port list lock */ | 
 | 55 | static spinlock_t ib_mad_port_list_lock; | 
 | 56 |  | 
 | 57 |  | 
 | 58 | /* Forward declarations */ | 
 | 59 | static int method_in_use(struct ib_mad_mgmt_method_table **method, | 
 | 60 | 			 struct ib_mad_reg_req *mad_reg_req); | 
 | 61 | static void remove_mad_reg_req(struct ib_mad_agent_private *priv); | 
 | 62 | static struct ib_mad_agent_private *find_mad_agent( | 
 | 63 | 					struct ib_mad_port_private *port_priv, | 
| Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 64 | 					struct ib_mad *mad); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | 
 | 66 | 				    struct ib_mad_private *mad); | 
 | 67 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | static void timeout_sends(void *data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | static void local_completions(void *data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, | 
 | 71 | 			      struct ib_mad_agent_private *agent_priv, | 
 | 72 | 			      u8 mgmt_class); | 
 | 73 | static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, | 
 | 74 | 			   struct ib_mad_agent_private *agent_priv); | 
 | 75 |  | 
 | 76 | /* | 
 | 77 |  * Returns a ib_mad_port_private structure or NULL for a device/port | 
 | 78 |  * Assumes ib_mad_port_list_lock is being held | 
 | 79 |  */ | 
 | 80 | static inline struct ib_mad_port_private * | 
 | 81 | __ib_get_mad_port(struct ib_device *device, int port_num) | 
 | 82 | { | 
 | 83 | 	struct ib_mad_port_private *entry; | 
 | 84 |  | 
 | 85 | 	list_for_each_entry(entry, &ib_mad_port_list, port_list) { | 
 | 86 | 		if (entry->device == device && entry->port_num == port_num) | 
 | 87 | 			return entry; | 
 | 88 | 	} | 
 | 89 | 	return NULL; | 
 | 90 | } | 
 | 91 |  | 
 | 92 | /* | 
 | 93 |  * Wrapper function to return a ib_mad_port_private structure or NULL | 
 | 94 |  * for a device/port | 
 | 95 |  */ | 
 | 96 | static inline struct ib_mad_port_private * | 
 | 97 | ib_get_mad_port(struct ib_device *device, int port_num) | 
 | 98 | { | 
 | 99 | 	struct ib_mad_port_private *entry; | 
 | 100 | 	unsigned long flags; | 
 | 101 |  | 
 | 102 | 	spin_lock_irqsave(&ib_mad_port_list_lock, flags); | 
 | 103 | 	entry = __ib_get_mad_port(device, port_num); | 
 | 104 | 	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | 
 | 105 |  | 
 | 106 | 	return entry; | 
 | 107 | } | 
 | 108 |  | 
 | 109 | static inline u8 convert_mgmt_class(u8 mgmt_class) | 
 | 110 | { | 
 | 111 | 	/* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */ | 
 | 112 | 	return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? | 
 | 113 | 		0 : mgmt_class; | 
 | 114 | } | 
 | 115 |  | 
 | 116 | static int get_spl_qp_index(enum ib_qp_type qp_type) | 
 | 117 | { | 
 | 118 | 	switch (qp_type) | 
 | 119 | 	{ | 
 | 120 | 	case IB_QPT_SMI: | 
 | 121 | 		return 0; | 
 | 122 | 	case IB_QPT_GSI: | 
 | 123 | 		return 1; | 
 | 124 | 	default: | 
 | 125 | 		return -1; | 
 | 126 | 	} | 
 | 127 | } | 
 | 128 |  | 
 | 129 | static int vendor_class_index(u8 mgmt_class) | 
 | 130 | { | 
 | 131 | 	return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; | 
 | 132 | } | 
 | 133 |  | 
 | 134 | static int is_vendor_class(u8 mgmt_class) | 
 | 135 | { | 
 | 136 | 	if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) || | 
 | 137 | 	    (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END)) | 
 | 138 | 		return 0; | 
 | 139 | 	return 1; | 
 | 140 | } | 
 | 141 |  | 
 | 142 | static int is_vendor_oui(char *oui) | 
 | 143 | { | 
 | 144 | 	if (oui[0] || oui[1] || oui[2]) | 
 | 145 | 		return 1; | 
 | 146 | 	return 0; | 
 | 147 | } | 
 | 148 |  | 
 | 149 | static int is_vendor_method_in_use( | 
 | 150 | 		struct ib_mad_mgmt_vendor_class *vendor_class, | 
 | 151 | 		struct ib_mad_reg_req *mad_reg_req) | 
 | 152 | { | 
 | 153 | 	struct ib_mad_mgmt_method_table *method; | 
 | 154 | 	int i; | 
 | 155 |  | 
 | 156 | 	for (i = 0; i < MAX_MGMT_OUI; i++) { | 
 | 157 | 		if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { | 
 | 158 | 			method = vendor_class->method_table[i]; | 
 | 159 | 			if (method) { | 
 | 160 | 				if (method_in_use(&method, mad_reg_req)) | 
 | 161 | 					return 1; | 
 | 162 | 				else | 
 | 163 | 					break; | 
 | 164 | 			} | 
 | 165 | 		} | 
 | 166 | 	} | 
 | 167 | 	return 0; | 
 | 168 | } | 
 | 169 |  | 
 | 170 | /* | 
 | 171 |  * ib_register_mad_agent - Register to send/receive MADs | 
 | 172 |  */ | 
 | 173 | struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | 
 | 174 | 					   u8 port_num, | 
 | 175 | 					   enum ib_qp_type qp_type, | 
 | 176 | 					   struct ib_mad_reg_req *mad_reg_req, | 
 | 177 | 					   u8 rmpp_version, | 
 | 178 | 					   ib_mad_send_handler send_handler, | 
 | 179 | 					   ib_mad_recv_handler recv_handler, | 
 | 180 | 					   void *context) | 
 | 181 | { | 
 | 182 | 	struct ib_mad_port_private *port_priv; | 
 | 183 | 	struct ib_mad_agent *ret = ERR_PTR(-EINVAL); | 
 | 184 | 	struct ib_mad_agent_private *mad_agent_priv; | 
 | 185 | 	struct ib_mad_reg_req *reg_req = NULL; | 
 | 186 | 	struct ib_mad_mgmt_class_table *class; | 
 | 187 | 	struct ib_mad_mgmt_vendor_class_table *vendor; | 
 | 188 | 	struct ib_mad_mgmt_vendor_class *vendor_class; | 
 | 189 | 	struct ib_mad_mgmt_method_table *method; | 
 | 190 | 	int ret2, qpn; | 
 | 191 | 	unsigned long flags; | 
 | 192 | 	u8 mgmt_class, vclass; | 
 | 193 |  | 
 | 194 | 	/* Validate parameters */ | 
 | 195 | 	qpn = get_spl_qp_index(qp_type); | 
 | 196 | 	if (qpn == -1) | 
 | 197 | 		goto error1; | 
 | 198 |  | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 199 | 	if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) | 
 | 200 | 		goto error1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 |  | 
 | 202 | 	/* Validate MAD registration request if supplied */ | 
 | 203 | 	if (mad_reg_req) { | 
 | 204 | 		if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) | 
 | 205 | 			goto error1; | 
 | 206 | 		if (!recv_handler) | 
 | 207 | 			goto error1; | 
 | 208 | 		if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { | 
 | 209 | 			/* | 
 | 210 | 			 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only | 
 | 211 | 			 * one in this range currently allowed | 
 | 212 | 			 */ | 
 | 213 | 			if (mad_reg_req->mgmt_class != | 
 | 214 | 			    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | 
 | 215 | 				goto error1; | 
 | 216 | 		} else if (mad_reg_req->mgmt_class == 0) { | 
 | 217 | 			/* | 
 | 218 | 			 * Class 0 is reserved in IBA and is used for | 
 | 219 | 			 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE | 
 | 220 | 			 */ | 
 | 221 | 			goto error1; | 
 | 222 | 		} else if (is_vendor_class(mad_reg_req->mgmt_class)) { | 
 | 223 | 			/* | 
 | 224 | 			 * If class is in "new" vendor range, | 
 | 225 | 			 * ensure supplied OUI is not zero | 
 | 226 | 			 */ | 
 | 227 | 			if (!is_vendor_oui(mad_reg_req->oui)) | 
 | 228 | 				goto error1; | 
 | 229 | 		} | 
 | 230 | 		/* Make sure class supplied is consistent with QP type */ | 
 | 231 | 		if (qp_type == IB_QPT_SMI) { | 
 | 232 | 			if ((mad_reg_req->mgmt_class != | 
 | 233 | 					IB_MGMT_CLASS_SUBN_LID_ROUTED) && | 
 | 234 | 			    (mad_reg_req->mgmt_class != | 
 | 235 | 					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) | 
 | 236 | 				goto error1; | 
 | 237 | 		} else { | 
 | 238 | 			if ((mad_reg_req->mgmt_class == | 
 | 239 | 					IB_MGMT_CLASS_SUBN_LID_ROUTED) || | 
 | 240 | 			    (mad_reg_req->mgmt_class == | 
 | 241 | 					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) | 
 | 242 | 				goto error1; | 
 | 243 | 		} | 
 | 244 | 	} else { | 
 | 245 | 		/* No registration request supplied */ | 
 | 246 | 		if (!send_handler) | 
 | 247 | 			goto error1; | 
 | 248 | 	} | 
 | 249 |  | 
 | 250 | 	/* Validate device and port */ | 
 | 251 | 	port_priv = ib_get_mad_port(device, port_num); | 
 | 252 | 	if (!port_priv) { | 
 | 253 | 		ret = ERR_PTR(-ENODEV); | 
 | 254 | 		goto error1; | 
 | 255 | 	} | 
 | 256 |  | 
 | 257 | 	/* Allocate structures */ | 
| Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 258 | 	mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | 	if (!mad_agent_priv) { | 
 | 260 | 		ret = ERR_PTR(-ENOMEM); | 
 | 261 | 		goto error1; | 
 | 262 | 	} | 
| Hal Rosenstock | b82cab6 | 2005-07-27 11:45:22 -0700 | [diff] [blame] | 263 |  | 
 | 264 | 	mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, | 
 | 265 | 						 IB_ACCESS_LOCAL_WRITE); | 
 | 266 | 	if (IS_ERR(mad_agent_priv->agent.mr)) { | 
 | 267 | 		ret = ERR_PTR(-ENOMEM); | 
 | 268 | 		goto error2; | 
 | 269 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 |  | 
 | 271 | 	if (mad_reg_req) { | 
 | 272 | 		reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL); | 
 | 273 | 		if (!reg_req) { | 
 | 274 | 			ret = ERR_PTR(-ENOMEM); | 
| Hal Rosenstock | b82cab6 | 2005-07-27 11:45:22 -0700 | [diff] [blame] | 275 | 			goto error3; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | 		} | 
 | 277 | 		/* Make a copy of the MAD registration request */ | 
 | 278 | 		memcpy(reg_req, mad_reg_req, sizeof *reg_req); | 
 | 279 | 	} | 
 | 280 |  | 
 | 281 | 	/* Now, fill in the various structures */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | 	mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; | 
 | 283 | 	mad_agent_priv->reg_req = reg_req; | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 284 | 	mad_agent_priv->agent.rmpp_version = rmpp_version; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | 	mad_agent_priv->agent.device = device; | 
 | 286 | 	mad_agent_priv->agent.recv_handler = recv_handler; | 
 | 287 | 	mad_agent_priv->agent.send_handler = send_handler; | 
 | 288 | 	mad_agent_priv->agent.context = context; | 
 | 289 | 	mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; | 
 | 290 | 	mad_agent_priv->agent.port_num = port_num; | 
 | 291 |  | 
 | 292 | 	spin_lock_irqsave(&port_priv->reg_lock, flags); | 
 | 293 | 	mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; | 
 | 294 |  | 
 | 295 | 	/* | 
 | 296 | 	 * Make sure MAD registration (if supplied) | 
 | 297 | 	 * is non overlapping with any existing ones | 
 | 298 | 	 */ | 
 | 299 | 	if (mad_reg_req) { | 
 | 300 | 		mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); | 
 | 301 | 		if (!is_vendor_class(mgmt_class)) { | 
 | 302 | 			class = port_priv->version[mad_reg_req-> | 
 | 303 | 						   mgmt_class_version].class; | 
 | 304 | 			if (class) { | 
 | 305 | 				method = class->method_table[mgmt_class]; | 
 | 306 | 				if (method) { | 
 | 307 | 					if (method_in_use(&method, | 
 | 308 | 							   mad_reg_req)) | 
| Hal Rosenstock | b82cab6 | 2005-07-27 11:45:22 -0700 | [diff] [blame] | 309 | 						goto error4; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | 				} | 
 | 311 | 			} | 
 | 312 | 			ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, | 
 | 313 | 						  mgmt_class); | 
 | 314 | 		} else { | 
 | 315 | 			/* "New" vendor class range */ | 
 | 316 | 			vendor = port_priv->version[mad_reg_req-> | 
 | 317 | 						    mgmt_class_version].vendor; | 
 | 318 | 			if (vendor) { | 
 | 319 | 				vclass = vendor_class_index(mgmt_class); | 
 | 320 | 				vendor_class = vendor->vendor_class[vclass]; | 
 | 321 | 				if (vendor_class) { | 
 | 322 | 					if (is_vendor_method_in_use( | 
 | 323 | 							vendor_class, | 
 | 324 | 							mad_reg_req)) | 
| Hal Rosenstock | b82cab6 | 2005-07-27 11:45:22 -0700 | [diff] [blame] | 325 | 						goto error4; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | 				} | 
 | 327 | 			} | 
 | 328 | 			ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); | 
 | 329 | 		} | 
 | 330 | 		if (ret2) { | 
 | 331 | 			ret = ERR_PTR(ret2); | 
| Hal Rosenstock | b82cab6 | 2005-07-27 11:45:22 -0700 | [diff] [blame] | 332 | 			goto error4; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | 		} | 
 | 334 | 	} | 
 | 335 |  | 
 | 336 | 	/* Add mad agent into port's agent list */ | 
 | 337 | 	list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); | 
 | 338 | 	spin_unlock_irqrestore(&port_priv->reg_lock, flags); | 
 | 339 |  | 
 | 340 | 	spin_lock_init(&mad_agent_priv->lock); | 
 | 341 | 	INIT_LIST_HEAD(&mad_agent_priv->send_list); | 
 | 342 | 	INIT_LIST_HEAD(&mad_agent_priv->wait_list); | 
| Hal Rosenstock | 6a0c435 | 2005-07-27 11:45:26 -0700 | [diff] [blame] | 343 | 	INIT_LIST_HEAD(&mad_agent_priv->done_list); | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 344 | 	INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | 	INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); | 
 | 346 | 	INIT_LIST_HEAD(&mad_agent_priv->local_list); | 
 | 347 | 	INIT_WORK(&mad_agent_priv->local_work, local_completions, | 
 | 348 | 		   mad_agent_priv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | 	atomic_set(&mad_agent_priv->refcount, 1); | 
 | 350 | 	init_waitqueue_head(&mad_agent_priv->wait); | 
 | 351 |  | 
 | 352 | 	return &mad_agent_priv->agent; | 
 | 353 |  | 
| Hal Rosenstock | b82cab6 | 2005-07-27 11:45:22 -0700 | [diff] [blame] | 354 | error4: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | 	spin_unlock_irqrestore(&port_priv->reg_lock, flags); | 
 | 356 | 	kfree(reg_req); | 
| Hal Rosenstock | b82cab6 | 2005-07-27 11:45:22 -0700 | [diff] [blame] | 357 | error3: | 
| Hal Rosenstock | b82cab6 | 2005-07-27 11:45:22 -0700 | [diff] [blame] | 358 | 	ib_dereg_mr(mad_agent_priv->agent.mr); | 
| Adrian Bunk | 2012a11 | 2005-11-27 00:37:36 +0100 | [diff] [blame^] | 359 | error2: | 
 | 360 | 	kfree(mad_agent_priv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | error1: | 
 | 362 | 	return ret; | 
 | 363 | } | 
 | 364 | EXPORT_SYMBOL(ib_register_mad_agent); | 
 | 365 |  | 
 | 366 | static inline int is_snooping_sends(int mad_snoop_flags) | 
 | 367 | { | 
 | 368 | 	return (mad_snoop_flags & | 
 | 369 | 		(/*IB_MAD_SNOOP_POSTED_SENDS | | 
 | 370 | 		 IB_MAD_SNOOP_RMPP_SENDS |*/ | 
 | 371 | 		 IB_MAD_SNOOP_SEND_COMPLETIONS /*| | 
 | 372 | 		 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/)); | 
 | 373 | } | 
 | 374 |  | 
 | 375 | static inline int is_snooping_recvs(int mad_snoop_flags) | 
 | 376 | { | 
 | 377 | 	return (mad_snoop_flags & | 
 | 378 | 		(IB_MAD_SNOOP_RECVS /*| | 
 | 379 | 		 IB_MAD_SNOOP_RMPP_RECVS*/)); | 
 | 380 | } | 
 | 381 |  | 
 | 382 | static int register_snoop_agent(struct ib_mad_qp_info *qp_info, | 
 | 383 | 				struct ib_mad_snoop_private *mad_snoop_priv) | 
 | 384 | { | 
 | 385 | 	struct ib_mad_snoop_private **new_snoop_table; | 
 | 386 | 	unsigned long flags; | 
 | 387 | 	int i; | 
 | 388 |  | 
 | 389 | 	spin_lock_irqsave(&qp_info->snoop_lock, flags); | 
 | 390 | 	/* Check for empty slot in array. */ | 
 | 391 | 	for (i = 0; i < qp_info->snoop_table_size; i++) | 
 | 392 | 		if (!qp_info->snoop_table[i]) | 
 | 393 | 			break; | 
 | 394 |  | 
 | 395 | 	if (i == qp_info->snoop_table_size) { | 
 | 396 | 		/* Grow table. */ | 
 | 397 | 		new_snoop_table = kmalloc(sizeof mad_snoop_priv * | 
 | 398 | 					  qp_info->snoop_table_size + 1, | 
 | 399 | 					  GFP_ATOMIC); | 
 | 400 | 		if (!new_snoop_table) { | 
 | 401 | 			i = -ENOMEM; | 
 | 402 | 			goto out; | 
 | 403 | 		} | 
 | 404 | 		if (qp_info->snoop_table) { | 
 | 405 | 			memcpy(new_snoop_table, qp_info->snoop_table, | 
 | 406 | 			       sizeof mad_snoop_priv * | 
 | 407 | 			       qp_info->snoop_table_size); | 
 | 408 | 			kfree(qp_info->snoop_table); | 
 | 409 | 		} | 
 | 410 | 		qp_info->snoop_table = new_snoop_table; | 
 | 411 | 		qp_info->snoop_table_size++; | 
 | 412 | 	} | 
 | 413 | 	qp_info->snoop_table[i] = mad_snoop_priv; | 
 | 414 | 	atomic_inc(&qp_info->snoop_count); | 
 | 415 | out: | 
 | 416 | 	spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 
 | 417 | 	return i; | 
 | 418 | } | 
 | 419 |  | 
 | 420 | struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, | 
 | 421 | 					   u8 port_num, | 
 | 422 | 					   enum ib_qp_type qp_type, | 
 | 423 | 					   int mad_snoop_flags, | 
 | 424 | 					   ib_mad_snoop_handler snoop_handler, | 
 | 425 | 					   ib_mad_recv_handler recv_handler, | 
 | 426 | 					   void *context) | 
 | 427 | { | 
 | 428 | 	struct ib_mad_port_private *port_priv; | 
 | 429 | 	struct ib_mad_agent *ret; | 
 | 430 | 	struct ib_mad_snoop_private *mad_snoop_priv; | 
 | 431 | 	int qpn; | 
 | 432 |  | 
 | 433 | 	/* Validate parameters */ | 
 | 434 | 	if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || | 
 | 435 | 	    (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) { | 
 | 436 | 		ret = ERR_PTR(-EINVAL); | 
 | 437 | 		goto error1; | 
 | 438 | 	} | 
 | 439 | 	qpn = get_spl_qp_index(qp_type); | 
 | 440 | 	if (qpn == -1) { | 
 | 441 | 		ret = ERR_PTR(-EINVAL); | 
 | 442 | 		goto error1; | 
 | 443 | 	} | 
 | 444 | 	port_priv = ib_get_mad_port(device, port_num); | 
 | 445 | 	if (!port_priv) { | 
 | 446 | 		ret = ERR_PTR(-ENODEV); | 
 | 447 | 		goto error1; | 
 | 448 | 	} | 
 | 449 | 	/* Allocate structures */ | 
| Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 450 | 	mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | 	if (!mad_snoop_priv) { | 
 | 452 | 		ret = ERR_PTR(-ENOMEM); | 
 | 453 | 		goto error1; | 
 | 454 | 	} | 
 | 455 |  | 
 | 456 | 	/* Now, fill in the various structures */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | 	mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; | 
 | 458 | 	mad_snoop_priv->agent.device = device; | 
 | 459 | 	mad_snoop_priv->agent.recv_handler = recv_handler; | 
 | 460 | 	mad_snoop_priv->agent.snoop_handler = snoop_handler; | 
 | 461 | 	mad_snoop_priv->agent.context = context; | 
 | 462 | 	mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; | 
 | 463 | 	mad_snoop_priv->agent.port_num = port_num; | 
 | 464 | 	mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; | 
 | 465 | 	init_waitqueue_head(&mad_snoop_priv->wait); | 
 | 466 | 	mad_snoop_priv->snoop_index = register_snoop_agent( | 
 | 467 | 						&port_priv->qp_info[qpn], | 
 | 468 | 						mad_snoop_priv); | 
 | 469 | 	if (mad_snoop_priv->snoop_index < 0) { | 
 | 470 | 		ret = ERR_PTR(mad_snoop_priv->snoop_index); | 
 | 471 | 		goto error2; | 
 | 472 | 	} | 
 | 473 |  | 
 | 474 | 	atomic_set(&mad_snoop_priv->refcount, 1); | 
 | 475 | 	return &mad_snoop_priv->agent; | 
 | 476 |  | 
 | 477 | error2: | 
 | 478 | 	kfree(mad_snoop_priv); | 
 | 479 | error1: | 
 | 480 | 	return ret; | 
 | 481 | } | 
 | 482 | EXPORT_SYMBOL(ib_register_mad_snoop); | 
 | 483 |  | 
 | 484 | static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | 
 | 485 | { | 
 | 486 | 	struct ib_mad_port_private *port_priv; | 
 | 487 | 	unsigned long flags; | 
 | 488 |  | 
 | 489 | 	/* Note that we could still be handling received MADs */ | 
 | 490 |  | 
 | 491 | 	/* | 
 | 492 | 	 * Canceling all sends results in dropping received response | 
 | 493 | 	 * MADs, preventing us from queuing additional work | 
 | 494 | 	 */ | 
 | 495 | 	cancel_mads(mad_agent_priv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | 	port_priv = mad_agent_priv->qp_info->port_priv; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | 	cancel_delayed_work(&mad_agent_priv->timed_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 |  | 
 | 499 | 	spin_lock_irqsave(&port_priv->reg_lock, flags); | 
 | 500 | 	remove_mad_reg_req(mad_agent_priv); | 
 | 501 | 	list_del(&mad_agent_priv->agent_list); | 
 | 502 | 	spin_unlock_irqrestore(&port_priv->reg_lock, flags); | 
 | 503 |  | 
| Hal Rosenstock | b82cab6 | 2005-07-27 11:45:22 -0700 | [diff] [blame] | 504 | 	flush_workqueue(port_priv->wq); | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 505 | 	ib_cancel_rmpp_recvs(mad_agent_priv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 |  | 
 | 507 | 	atomic_dec(&mad_agent_priv->refcount); | 
 | 508 | 	wait_event(mad_agent_priv->wait, | 
 | 509 | 		   !atomic_read(&mad_agent_priv->refcount)); | 
 | 510 |  | 
| Jesper Juhl | 6044ec8 | 2005-11-07 01:01:32 -0800 | [diff] [blame] | 511 | 	kfree(mad_agent_priv->reg_req); | 
| Hal Rosenstock | b82cab6 | 2005-07-27 11:45:22 -0700 | [diff] [blame] | 512 | 	ib_dereg_mr(mad_agent_priv->agent.mr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | 	kfree(mad_agent_priv); | 
 | 514 | } | 
 | 515 |  | 
 | 516 | static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) | 
 | 517 | { | 
 | 518 | 	struct ib_mad_qp_info *qp_info; | 
 | 519 | 	unsigned long flags; | 
 | 520 |  | 
 | 521 | 	qp_info = mad_snoop_priv->qp_info; | 
 | 522 | 	spin_lock_irqsave(&qp_info->snoop_lock, flags); | 
 | 523 | 	qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; | 
 | 524 | 	atomic_dec(&qp_info->snoop_count); | 
 | 525 | 	spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 
 | 526 |  | 
 | 527 | 	atomic_dec(&mad_snoop_priv->refcount); | 
 | 528 | 	wait_event(mad_snoop_priv->wait, | 
 | 529 | 		   !atomic_read(&mad_snoop_priv->refcount)); | 
 | 530 |  | 
 | 531 | 	kfree(mad_snoop_priv); | 
 | 532 | } | 
 | 533 |  | 
 | 534 | /* | 
 | 535 |  * ib_unregister_mad_agent - Unregisters a client from using MAD services | 
 | 536 |  */ | 
 | 537 | int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) | 
 | 538 | { | 
 | 539 | 	struct ib_mad_agent_private *mad_agent_priv; | 
 | 540 | 	struct ib_mad_snoop_private *mad_snoop_priv; | 
 | 541 |  | 
 | 542 | 	/* If the TID is zero, the agent can only snoop. */ | 
 | 543 | 	if (mad_agent->hi_tid) { | 
 | 544 | 		mad_agent_priv = container_of(mad_agent, | 
 | 545 | 					      struct ib_mad_agent_private, | 
 | 546 | 					      agent); | 
 | 547 | 		unregister_mad_agent(mad_agent_priv); | 
 | 548 | 	} else { | 
 | 549 | 		mad_snoop_priv = container_of(mad_agent, | 
 | 550 | 					      struct ib_mad_snoop_private, | 
 | 551 | 					      agent); | 
 | 552 | 		unregister_mad_snoop(mad_snoop_priv); | 
 | 553 | 	} | 
 | 554 | 	return 0; | 
 | 555 | } | 
 | 556 | EXPORT_SYMBOL(ib_unregister_mad_agent); | 
 | 557 |  | 
| Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 558 | static inline int response_mad(struct ib_mad *mad) | 
 | 559 | { | 
 | 560 | 	/* Trap represses are responses although response bit is reset */ | 
 | 561 | 	return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) || | 
 | 562 | 		(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)); | 
 | 563 | } | 
 | 564 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | static void dequeue_mad(struct ib_mad_list_head *mad_list) | 
 | 566 | { | 
 | 567 | 	struct ib_mad_queue *mad_queue; | 
 | 568 | 	unsigned long flags; | 
 | 569 |  | 
 | 570 | 	BUG_ON(!mad_list->mad_queue); | 
 | 571 | 	mad_queue = mad_list->mad_queue; | 
 | 572 | 	spin_lock_irqsave(&mad_queue->lock, flags); | 
 | 573 | 	list_del(&mad_list->list); | 
 | 574 | 	mad_queue->count--; | 
 | 575 | 	spin_unlock_irqrestore(&mad_queue->lock, flags); | 
 | 576 | } | 
 | 577 |  | 
 | 578 | static void snoop_send(struct ib_mad_qp_info *qp_info, | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 579 | 		       struct ib_mad_send_buf *send_buf, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | 		       struct ib_mad_send_wc *mad_send_wc, | 
 | 581 | 		       int mad_snoop_flags) | 
 | 582 | { | 
 | 583 | 	struct ib_mad_snoop_private *mad_snoop_priv; | 
 | 584 | 	unsigned long flags; | 
 | 585 | 	int i; | 
 | 586 |  | 
 | 587 | 	spin_lock_irqsave(&qp_info->snoop_lock, flags); | 
 | 588 | 	for (i = 0; i < qp_info->snoop_table_size; i++) { | 
 | 589 | 		mad_snoop_priv = qp_info->snoop_table[i]; | 
 | 590 | 		if (!mad_snoop_priv || | 
 | 591 | 		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) | 
 | 592 | 			continue; | 
 | 593 |  | 
 | 594 | 		atomic_inc(&mad_snoop_priv->refcount); | 
 | 595 | 		spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 
 | 596 | 		mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 597 | 						    send_buf, mad_send_wc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | 		if (atomic_dec_and_test(&mad_snoop_priv->refcount)) | 
 | 599 | 			wake_up(&mad_snoop_priv->wait); | 
 | 600 | 		spin_lock_irqsave(&qp_info->snoop_lock, flags); | 
 | 601 | 	} | 
 | 602 | 	spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 
 | 603 | } | 
 | 604 |  | 
 | 605 | static void snoop_recv(struct ib_mad_qp_info *qp_info, | 
 | 606 | 		       struct ib_mad_recv_wc *mad_recv_wc, | 
 | 607 | 		       int mad_snoop_flags) | 
 | 608 | { | 
 | 609 | 	struct ib_mad_snoop_private *mad_snoop_priv; | 
 | 610 | 	unsigned long flags; | 
 | 611 | 	int i; | 
 | 612 |  | 
 | 613 | 	spin_lock_irqsave(&qp_info->snoop_lock, flags); | 
 | 614 | 	for (i = 0; i < qp_info->snoop_table_size; i++) { | 
 | 615 | 		mad_snoop_priv = qp_info->snoop_table[i]; | 
 | 616 | 		if (!mad_snoop_priv || | 
 | 617 | 		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) | 
 | 618 | 			continue; | 
 | 619 |  | 
 | 620 | 		atomic_inc(&mad_snoop_priv->refcount); | 
 | 621 | 		spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 
 | 622 | 		mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, | 
 | 623 | 						   mad_recv_wc); | 
 | 624 | 		if (atomic_dec_and_test(&mad_snoop_priv->refcount)) | 
 | 625 | 			wake_up(&mad_snoop_priv->wait); | 
 | 626 | 		spin_lock_irqsave(&qp_info->snoop_lock, flags); | 
 | 627 | 	} | 
 | 628 | 	spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 
 | 629 | } | 
 | 630 |  | 
 | 631 | static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num, | 
 | 632 | 			 struct ib_wc *wc) | 
 | 633 | { | 
 | 634 | 	memset(wc, 0, sizeof *wc); | 
 | 635 | 	wc->wr_id = wr_id; | 
 | 636 | 	wc->status = IB_WC_SUCCESS; | 
 | 637 | 	wc->opcode = IB_WC_RECV; | 
 | 638 | 	wc->pkey_index = pkey_index; | 
 | 639 | 	wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); | 
 | 640 | 	wc->src_qp = IB_QP0; | 
 | 641 | 	wc->qp_num = IB_QP0; | 
 | 642 | 	wc->slid = slid; | 
 | 643 | 	wc->sl = 0; | 
 | 644 | 	wc->dlid_path_bits = 0; | 
 | 645 | 	wc->port_num = port_num; | 
 | 646 | } | 
 | 647 |  | 
 | 648 | /* | 
 | 649 |  * Return 0 if SMP is to be sent | 
 | 650 |  * Return 1 if SMP was consumed locally (whether or not solicited) | 
 | 651 |  * Return < 0 if error | 
 | 652 |  */ | 
 | 653 | static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 654 | 				  struct ib_mad_send_wr_private *mad_send_wr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | { | 
| Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 656 | 	int ret; | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 657 | 	struct ib_smp *smp = mad_send_wr->send_buf.mad; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 658 | 	unsigned long flags; | 
 | 659 | 	struct ib_mad_local_private *local; | 
 | 660 | 	struct ib_mad_private *mad_priv; | 
 | 661 | 	struct ib_mad_port_private *port_priv; | 
 | 662 | 	struct ib_mad_agent_private *recv_mad_agent = NULL; | 
 | 663 | 	struct ib_device *device = mad_agent_priv->agent.device; | 
 | 664 | 	u8 port_num = mad_agent_priv->agent.port_num; | 
 | 665 | 	struct ib_wc mad_wc; | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 666 | 	struct ib_send_wr *send_wr = &mad_send_wr->send_wr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 |  | 
 | 668 | 	if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) { | 
 | 669 | 		ret = -EINVAL; | 
 | 670 | 		printk(KERN_ERR PFX "Invalid directed route\n"); | 
 | 671 | 		goto out; | 
 | 672 | 	} | 
 | 673 | 	/* Check to post send on QP or process locally */ | 
 | 674 | 	ret = smi_check_local_dr_smp(smp, device, port_num); | 
 | 675 | 	if (!ret || !device->process_mad) | 
 | 676 | 		goto out; | 
 | 677 |  | 
 | 678 | 	local = kmalloc(sizeof *local, GFP_ATOMIC); | 
 | 679 | 	if (!local) { | 
 | 680 | 		ret = -ENOMEM; | 
 | 681 | 		printk(KERN_ERR PFX "No memory for ib_mad_local_private\n"); | 
 | 682 | 		goto out; | 
 | 683 | 	} | 
 | 684 | 	local->mad_priv = NULL; | 
 | 685 | 	local->recv_mad_agent = NULL; | 
 | 686 | 	mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); | 
 | 687 | 	if (!mad_priv) { | 
 | 688 | 		ret = -ENOMEM; | 
 | 689 | 		printk(KERN_ERR PFX "No memory for local response MAD\n"); | 
 | 690 | 		kfree(local); | 
 | 691 | 		goto out; | 
 | 692 | 	} | 
 | 693 |  | 
| Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 694 | 	build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid), | 
 | 695 | 		     send_wr->wr.ud.pkey_index, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 696 | 		     send_wr->wr.ud.port_num, &mad_wc); | 
 | 697 |  | 
 | 698 | 	/* No GRH for DR SMP */ | 
 | 699 | 	ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, | 
 | 700 | 				  (struct ib_mad *)smp, | 
 | 701 | 				  (struct ib_mad *)&mad_priv->mad); | 
 | 702 | 	switch (ret) | 
 | 703 | 	{ | 
 | 704 | 	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: | 
| Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 705 | 		if (response_mad(&mad_priv->mad.mad) && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 706 | 		    mad_agent_priv->agent.recv_handler) { | 
 | 707 | 			local->mad_priv = mad_priv; | 
 | 708 | 			local->recv_mad_agent = mad_agent_priv; | 
 | 709 | 			/* | 
 | 710 | 			 * Reference MAD agent until receive | 
 | 711 | 			 * side of local completion handled | 
 | 712 | 			 */ | 
 | 713 | 			atomic_inc(&mad_agent_priv->refcount); | 
 | 714 | 		} else | 
 | 715 | 			kmem_cache_free(ib_mad_cache, mad_priv); | 
 | 716 | 		break; | 
 | 717 | 	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: | 
 | 718 | 		kmem_cache_free(ib_mad_cache, mad_priv); | 
 | 719 | 		break; | 
 | 720 | 	case IB_MAD_RESULT_SUCCESS: | 
 | 721 | 		/* Treat like an incoming receive MAD */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 | 		port_priv = ib_get_mad_port(mad_agent_priv->agent.device, | 
 | 723 | 					    mad_agent_priv->agent.port_num); | 
 | 724 | 		if (port_priv) { | 
 | 725 | 			mad_priv->mad.mad.mad_hdr.tid = | 
 | 726 | 				((struct ib_mad *)smp)->mad_hdr.tid; | 
 | 727 | 			recv_mad_agent = find_mad_agent(port_priv, | 
| Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 728 | 						        &mad_priv->mad.mad); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 729 | 		} | 
 | 730 | 		if (!port_priv || !recv_mad_agent) { | 
 | 731 | 			kmem_cache_free(ib_mad_cache, mad_priv); | 
 | 732 | 			kfree(local); | 
 | 733 | 			ret = 0; | 
 | 734 | 			goto out; | 
 | 735 | 		} | 
 | 736 | 		local->mad_priv = mad_priv; | 
 | 737 | 		local->recv_mad_agent = recv_mad_agent; | 
 | 738 | 		break; | 
 | 739 | 	default: | 
 | 740 | 		kmem_cache_free(ib_mad_cache, mad_priv); | 
 | 741 | 		kfree(local); | 
 | 742 | 		ret = -EINVAL; | 
 | 743 | 		goto out; | 
 | 744 | 	} | 
 | 745 |  | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 746 | 	local->mad_send_wr = mad_send_wr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | 	/* Reference MAD agent until send side of local completion handled */ | 
 | 748 | 	atomic_inc(&mad_agent_priv->refcount); | 
 | 749 | 	/* Queue local completion to local list */ | 
 | 750 | 	spin_lock_irqsave(&mad_agent_priv->lock, flags); | 
 | 751 | 	list_add_tail(&local->completion_list, &mad_agent_priv->local_list); | 
 | 752 | 	spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 
 | 753 | 	queue_work(mad_agent_priv->qp_info->port_priv->wq, | 
| Hal Rosenstock | b82cab6 | 2005-07-27 11:45:22 -0700 | [diff] [blame] | 754 | 		   &mad_agent_priv->local_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 | 	ret = 1; | 
 | 756 | out: | 
 | 757 | 	return ret; | 
 | 758 | } | 
 | 759 |  | 
| Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 760 | static int get_buf_length(int hdr_len, int data_len) | 
 | 761 | { | 
 | 762 | 	int seg_size, pad; | 
 | 763 |  | 
 | 764 | 	seg_size = sizeof(struct ib_mad) - hdr_len; | 
 | 765 | 	if (data_len && seg_size) { | 
 | 766 | 		pad = seg_size - data_len % seg_size; | 
 | 767 | 		if (pad == seg_size) | 
 | 768 | 			pad = 0; | 
 | 769 | 	} else | 
 | 770 | 		pad = seg_size; | 
 | 771 | 	return hdr_len + data_len + pad; | 
 | 772 | } | 
 | 773 |  | 
 | 774 | struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, | 
 | 775 | 					    u32 remote_qpn, u16 pkey_index, | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 776 | 					    int rmpp_active, | 
| Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 777 | 					    int hdr_len, int data_len, | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 778 | 					    gfp_t gfp_mask) | 
| Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 779 | { | 
 | 780 | 	struct ib_mad_agent_private *mad_agent_priv; | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 781 | 	struct ib_mad_send_wr_private *mad_send_wr; | 
| Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 782 | 	int buf_size; | 
 | 783 | 	void *buf; | 
 | 784 |  | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 785 | 	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, | 
 | 786 | 				      agent); | 
| Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 787 | 	buf_size = get_buf_length(hdr_len, data_len); | 
 | 788 |  | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 789 | 	if ((!mad_agent->rmpp_version && | 
 | 790 | 	     (rmpp_active || buf_size > sizeof(struct ib_mad))) || | 
 | 791 | 	    (!rmpp_active && buf_size > sizeof(struct ib_mad))) | 
 | 792 | 		return ERR_PTR(-EINVAL); | 
 | 793 |  | 
| Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 794 | 	buf = kzalloc(sizeof *mad_send_wr + buf_size, gfp_mask); | 
| Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 795 | 	if (!buf) | 
 | 796 | 		return ERR_PTR(-ENOMEM); | 
 | 797 |  | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 798 | 	mad_send_wr = buf + buf_size; | 
 | 799 | 	mad_send_wr->send_buf.mad = buf; | 
| Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 800 |  | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 801 | 	mad_send_wr->mad_agent_priv = mad_agent_priv; | 
 | 802 | 	mad_send_wr->sg_list[0].length = buf_size; | 
 | 803 | 	mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey; | 
| Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 804 |  | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 805 | 	mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr; | 
 | 806 | 	mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; | 
 | 807 | 	mad_send_wr->send_wr.num_sge = 1; | 
 | 808 | 	mad_send_wr->send_wr.opcode = IB_WR_SEND; | 
 | 809 | 	mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED; | 
 | 810 | 	mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn; | 
 | 811 | 	mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; | 
 | 812 | 	mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index; | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 813 |  | 
 | 814 | 	if (rmpp_active) { | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 815 | 		struct ib_rmpp_mad *rmpp_mad = mad_send_wr->send_buf.mad; | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 816 | 		rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(hdr_len - | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 817 | 						   IB_MGMT_RMPP_HDR + data_len); | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 818 | 		rmpp_mad->rmpp_hdr.rmpp_version = mad_agent->rmpp_version; | 
 | 819 | 		rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; | 
 | 820 | 		ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, | 
 | 821 | 				  IB_MGMT_RMPP_FLAG_ACTIVE); | 
 | 822 | 	} | 
 | 823 |  | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 824 | 	mad_send_wr->send_buf.mad_agent = mad_agent; | 
| Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 825 | 	atomic_inc(&mad_agent_priv->refcount); | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 826 | 	return &mad_send_wr->send_buf; | 
| Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 827 | } | 
 | 828 | EXPORT_SYMBOL(ib_create_send_mad); | 
 | 829 |  | 
 | 830 | void ib_free_send_mad(struct ib_mad_send_buf *send_buf) | 
 | 831 | { | 
 | 832 | 	struct ib_mad_agent_private *mad_agent_priv; | 
 | 833 |  | 
 | 834 | 	mad_agent_priv = container_of(send_buf->mad_agent, | 
 | 835 | 				      struct ib_mad_agent_private, agent); | 
| Hal Rosenstock | 824c8ae | 2005-07-27 11:45:23 -0700 | [diff] [blame] | 836 | 	kfree(send_buf->mad); | 
 | 837 |  | 
 | 838 | 	if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 
 | 839 | 		wake_up(&mad_agent_priv->wait); | 
 | 840 | } | 
 | 841 | EXPORT_SYMBOL(ib_free_send_mad); | 
 | 842 |  | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 843 | int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 844 | { | 
 | 845 | 	struct ib_mad_qp_info *qp_info; | 
| Hal Rosenstock | cabe3cb | 2005-07-27 11:45:33 -0700 | [diff] [blame] | 846 | 	struct list_head *list; | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 847 | 	struct ib_send_wr *bad_send_wr; | 
 | 848 | 	struct ib_mad_agent *mad_agent; | 
 | 849 | 	struct ib_sge *sge; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 850 | 	unsigned long flags; | 
 | 851 | 	int ret; | 
 | 852 |  | 
| Hal Rosenstock | f8197a4 | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 853 | 	/* Set WR ID to find mad_send_wr upon completion */ | 
| Hal Rosenstock | d760ce8 | 2005-07-27 11:45:25 -0700 | [diff] [blame] | 854 | 	qp_info = mad_send_wr->mad_agent_priv->qp_info; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | 	mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list; | 
 | 856 | 	mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; | 
 | 857 |  | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 858 | 	mad_agent = mad_send_wr->send_buf.mad_agent; | 
 | 859 | 	sge = mad_send_wr->sg_list; | 
 | 860 | 	sge->addr = dma_map_single(mad_agent->device->dma_device, | 
 | 861 | 				   mad_send_wr->send_buf.mad, sge->length, | 
 | 862 | 				   DMA_TO_DEVICE); | 
 | 863 | 	pci_unmap_addr_set(mad_send_wr, mapping, sge->addr); | 
 | 864 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 | 	spin_lock_irqsave(&qp_info->send_queue.lock, flags); | 
| Hal Rosenstock | cabe3cb | 2005-07-27 11:45:33 -0700 | [diff] [blame] | 866 | 	if (qp_info->send_queue.count < qp_info->send_queue.max_active) { | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 867 | 		ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr, | 
 | 868 | 				   &bad_send_wr); | 
| Hal Rosenstock | cabe3cb | 2005-07-27 11:45:33 -0700 | [diff] [blame] | 869 | 		list = &qp_info->send_queue.list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 | 	} else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 871 | 		ret = 0; | 
| Hal Rosenstock | cabe3cb | 2005-07-27 11:45:33 -0700 | [diff] [blame] | 872 | 		list = &qp_info->overflow_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 873 | 	} | 
| Hal Rosenstock | cabe3cb | 2005-07-27 11:45:33 -0700 | [diff] [blame] | 874 |  | 
 | 875 | 	if (!ret) { | 
 | 876 | 		qp_info->send_queue.count++; | 
 | 877 | 		list_add_tail(&mad_send_wr->mad_list.list, list); | 
 | 878 | 	} | 
 | 879 | 	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 880 | 	if (ret) | 
 | 881 | 		dma_unmap_single(mad_agent->device->dma_device, | 
 | 882 | 				 pci_unmap_addr(mad_send_wr, mapping), | 
 | 883 | 				 sge->length, DMA_TO_DEVICE); | 
 | 884 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 885 | 	return ret; | 
 | 886 | } | 
 | 887 |  | 
 | 888 | /* | 
 | 889 |  * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated | 
 | 890 |  *  with the registered client | 
 | 891 |  */ | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 892 | int ib_post_send_mad(struct ib_mad_send_buf *send_buf, | 
 | 893 | 		     struct ib_mad_send_buf **bad_send_buf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | 	struct ib_mad_agent_private *mad_agent_priv; | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 896 | 	struct ib_mad_send_buf *next_send_buf; | 
 | 897 | 	struct ib_mad_send_wr_private *mad_send_wr; | 
 | 898 | 	unsigned long flags; | 
 | 899 | 	int ret = -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 900 |  | 
 | 901 | 	/* Walk list of send WRs and post each on send list */ | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 902 | 	for (; send_buf; send_buf = next_send_buf) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 |  | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 904 | 		mad_send_wr = container_of(send_buf, | 
 | 905 | 					   struct ib_mad_send_wr_private, | 
 | 906 | 					   send_buf); | 
 | 907 | 		mad_agent_priv = mad_send_wr->mad_agent_priv; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 |  | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 909 | 		if (!send_buf->mad_agent->send_handler || | 
 | 910 | 		    (send_buf->timeout_ms && | 
 | 911 | 		     !send_buf->mad_agent->recv_handler)) { | 
 | 912 | 			ret = -EINVAL; | 
 | 913 | 			goto error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 914 | 		} | 
 | 915 |  | 
 | 916 | 		/* | 
 | 917 | 		 * Save pointer to next work request to post in case the | 
 | 918 | 		 * current one completes, and the user modifies the work | 
 | 919 | 		 * request associated with the completion | 
 | 920 | 		 */ | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 921 | 		next_send_buf = send_buf->next; | 
 | 922 | 		mad_send_wr->send_wr.wr.ud.ah = send_buf->ah; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 |  | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 924 | 		if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == | 
 | 925 | 		    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | 
 | 926 | 			ret = handle_outgoing_dr_smp(mad_agent_priv, | 
 | 927 | 						     mad_send_wr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 928 | 			if (ret < 0)		/* error */ | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 929 | 				goto error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 930 | 			else if (ret == 1)	/* locally consumed */ | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 931 | 				continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 932 | 		} | 
 | 933 |  | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 934 | 		mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 935 | 		/* Timeout will be updated after send completes */ | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 936 | 		mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); | 
 | 937 | 		mad_send_wr->retries = send_buf->retries; | 
 | 938 | 		/* Reference for work request to QP + response */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 939 | 		mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); | 
 | 940 | 		mad_send_wr->status = IB_WC_SUCCESS; | 
 | 941 |  | 
 | 942 | 		/* Reference MAD agent until send completes */ | 
 | 943 | 		atomic_inc(&mad_agent_priv->refcount); | 
 | 944 | 		spin_lock_irqsave(&mad_agent_priv->lock, flags); | 
 | 945 | 		list_add_tail(&mad_send_wr->agent_list, | 
 | 946 | 			      &mad_agent_priv->send_list); | 
 | 947 | 		spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 
 | 948 |  | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 949 | 		if (mad_agent_priv->agent.rmpp_version) { | 
 | 950 | 			ret = ib_send_rmpp_mad(mad_send_wr); | 
 | 951 | 			if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) | 
 | 952 | 				ret = ib_send_mad(mad_send_wr); | 
 | 953 | 		} else | 
 | 954 | 			ret = ib_send_mad(mad_send_wr); | 
 | 955 | 		if (ret < 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 956 | 			/* Fail send request */ | 
 | 957 | 			spin_lock_irqsave(&mad_agent_priv->lock, flags); | 
 | 958 | 			list_del(&mad_send_wr->agent_list); | 
 | 959 | 			spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 
 | 960 | 			atomic_dec(&mad_agent_priv->refcount); | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 961 | 			goto error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 962 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | 	} | 
 | 964 | 	return 0; | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 965 | error: | 
 | 966 | 	if (bad_send_buf) | 
 | 967 | 		*bad_send_buf = send_buf; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 968 | 	return ret; | 
 | 969 | } | 
 | 970 | EXPORT_SYMBOL(ib_post_send_mad); | 
 | 971 |  | 
 | 972 | /* | 
 | 973 |  * ib_free_recv_mad - Returns data buffers used to receive | 
 | 974 |  *  a MAD to the access layer | 
 | 975 |  */ | 
 | 976 | void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) | 
 | 977 | { | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 978 | 	struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 979 | 	struct ib_mad_private_header *mad_priv_hdr; | 
 | 980 | 	struct ib_mad_private *priv; | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 981 | 	struct list_head free_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 982 |  | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 983 | 	INIT_LIST_HEAD(&free_list); | 
 | 984 | 	list_splice_init(&mad_recv_wc->rmpp_list, &free_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 985 |  | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 986 | 	list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, | 
 | 987 | 					&free_list, list) { | 
 | 988 | 		mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, | 
 | 989 | 					   recv_buf); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 990 | 		mad_priv_hdr = container_of(mad_recv_wc, | 
 | 991 | 					    struct ib_mad_private_header, | 
 | 992 | 					    recv_wc); | 
 | 993 | 		priv = container_of(mad_priv_hdr, struct ib_mad_private, | 
 | 994 | 				    header); | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 995 | 		kmem_cache_free(ib_mad_cache, priv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 996 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 997 | } | 
 | 998 | EXPORT_SYMBOL(ib_free_recv_mad); | 
 | 999 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1000 | struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, | 
 | 1001 | 					u8 rmpp_version, | 
 | 1002 | 					ib_mad_send_handler send_handler, | 
 | 1003 | 					ib_mad_recv_handler recv_handler, | 
 | 1004 | 					void *context) | 
 | 1005 | { | 
 | 1006 | 	return ERR_PTR(-EINVAL);	/* XXX: for now */ | 
 | 1007 | } | 
 | 1008 | EXPORT_SYMBOL(ib_redirect_mad_qp); | 
 | 1009 |  | 
 | 1010 | int ib_process_mad_wc(struct ib_mad_agent *mad_agent, | 
 | 1011 | 		      struct ib_wc *wc) | 
 | 1012 | { | 
 | 1013 | 	printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n"); | 
 | 1014 | 	return 0; | 
 | 1015 | } | 
 | 1016 | EXPORT_SYMBOL(ib_process_mad_wc); | 
 | 1017 |  | 
 | 1018 | static int method_in_use(struct ib_mad_mgmt_method_table **method, | 
 | 1019 | 			 struct ib_mad_reg_req *mad_reg_req) | 
 | 1020 | { | 
 | 1021 | 	int i; | 
 | 1022 |  | 
 | 1023 | 	for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS); | 
 | 1024 | 	     i < IB_MGMT_MAX_METHODS; | 
 | 1025 | 	     i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS, | 
 | 1026 | 			       1+i)) { | 
 | 1027 | 		if ((*method)->agent[i]) { | 
 | 1028 | 			printk(KERN_ERR PFX "Method %d already in use\n", i); | 
 | 1029 | 			return -EINVAL; | 
 | 1030 | 		} | 
 | 1031 | 	} | 
 | 1032 | 	return 0; | 
 | 1033 | } | 
 | 1034 |  | 
 | 1035 | static int allocate_method_table(struct ib_mad_mgmt_method_table **method) | 
 | 1036 | { | 
 | 1037 | 	/* Allocate management method table */ | 
| Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 1038 | 	*method = kzalloc(sizeof **method, GFP_ATOMIC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1039 | 	if (!*method) { | 
 | 1040 | 		printk(KERN_ERR PFX "No memory for " | 
 | 1041 | 		       "ib_mad_mgmt_method_table\n"); | 
 | 1042 | 		return -ENOMEM; | 
 | 1043 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1044 |  | 
 | 1045 | 	return 0; | 
 | 1046 | } | 
 | 1047 |  | 
 | 1048 | /* | 
 | 1049 |  * Check to see if there are any methods still in use | 
 | 1050 |  */ | 
 | 1051 | static int check_method_table(struct ib_mad_mgmt_method_table *method) | 
 | 1052 | { | 
 | 1053 | 	int i; | 
 | 1054 |  | 
 | 1055 | 	for (i = 0; i < IB_MGMT_MAX_METHODS; i++) | 
 | 1056 | 		if (method->agent[i]) | 
 | 1057 | 			return 1; | 
 | 1058 | 	return 0; | 
 | 1059 | } | 
 | 1060 |  | 
 | 1061 | /* | 
 | 1062 |  * Check to see if there are any method tables for this class still in use | 
 | 1063 |  */ | 
 | 1064 | static int check_class_table(struct ib_mad_mgmt_class_table *class) | 
 | 1065 | { | 
 | 1066 | 	int i; | 
 | 1067 |  | 
 | 1068 | 	for (i = 0; i < MAX_MGMT_CLASS; i++) | 
 | 1069 | 		if (class->method_table[i]) | 
 | 1070 | 			return 1; | 
 | 1071 | 	return 0; | 
 | 1072 | } | 
 | 1073 |  | 
 | 1074 | static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) | 
 | 1075 | { | 
 | 1076 | 	int i; | 
 | 1077 |  | 
 | 1078 | 	for (i = 0; i < MAX_MGMT_OUI; i++) | 
 | 1079 | 		if (vendor_class->method_table[i]) | 
 | 1080 | 			return 1; | 
 | 1081 | 	return 0; | 
 | 1082 | } | 
 | 1083 |  | 
 | 1084 | static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, | 
 | 1085 | 			   char *oui) | 
 | 1086 | { | 
 | 1087 | 	int i; | 
 | 1088 |  | 
 | 1089 | 	for (i = 0; i < MAX_MGMT_OUI; i++) | 
 | 1090 |                 /* Is there matching OUI for this vendor class ? */ | 
 | 1091 |                 if (!memcmp(vendor_class->oui[i], oui, 3)) | 
 | 1092 | 			return i; | 
 | 1093 |  | 
 | 1094 | 	return -1; | 
 | 1095 | } | 
 | 1096 |  | 
 | 1097 | static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor) | 
 | 1098 | { | 
 | 1099 | 	int i; | 
 | 1100 |  | 
 | 1101 | 	for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) | 
 | 1102 | 		if (vendor->vendor_class[i]) | 
 | 1103 | 			return 1; | 
 | 1104 |  | 
 | 1105 | 	return 0; | 
 | 1106 | } | 
 | 1107 |  | 
 | 1108 | static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, | 
 | 1109 | 				     struct ib_mad_agent_private *agent) | 
 | 1110 | { | 
 | 1111 | 	int i; | 
 | 1112 |  | 
 | 1113 | 	/* Remove any methods for this mad agent */ | 
 | 1114 | 	for (i = 0; i < IB_MGMT_MAX_METHODS; i++) { | 
 | 1115 | 		if (method->agent[i] == agent) { | 
 | 1116 | 			method->agent[i] = NULL; | 
 | 1117 | 		} | 
 | 1118 | 	} | 
 | 1119 | } | 
 | 1120 |  | 
 | 1121 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, | 
 | 1122 | 			      struct ib_mad_agent_private *agent_priv, | 
 | 1123 | 			      u8 mgmt_class) | 
 | 1124 | { | 
 | 1125 | 	struct ib_mad_port_private *port_priv; | 
 | 1126 | 	struct ib_mad_mgmt_class_table **class; | 
 | 1127 | 	struct ib_mad_mgmt_method_table **method; | 
 | 1128 | 	int i, ret; | 
 | 1129 |  | 
 | 1130 | 	port_priv = agent_priv->qp_info->port_priv; | 
 | 1131 | 	class = &port_priv->version[mad_reg_req->mgmt_class_version].class; | 
 | 1132 | 	if (!*class) { | 
 | 1133 | 		/* Allocate management class table for "new" class version */ | 
| Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 1134 | 		*class = kzalloc(sizeof **class, GFP_ATOMIC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1135 | 		if (!*class) { | 
 | 1136 | 			printk(KERN_ERR PFX "No memory for " | 
 | 1137 | 			       "ib_mad_mgmt_class_table\n"); | 
 | 1138 | 			ret = -ENOMEM; | 
 | 1139 | 			goto error1; | 
 | 1140 | 		} | 
| Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 1141 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1142 | 		/* Allocate method table for this management class */ | 
 | 1143 | 		method = &(*class)->method_table[mgmt_class]; | 
 | 1144 | 		if ((ret = allocate_method_table(method))) | 
 | 1145 | 			goto error2; | 
 | 1146 | 	} else { | 
 | 1147 | 		method = &(*class)->method_table[mgmt_class]; | 
 | 1148 | 		if (!*method) { | 
 | 1149 | 			/* Allocate method table for this management class */ | 
 | 1150 | 			if ((ret = allocate_method_table(method))) | 
 | 1151 | 				goto error1; | 
 | 1152 | 		} | 
 | 1153 | 	} | 
 | 1154 |  | 
 | 1155 | 	/* Now, make sure methods are not already in use */ | 
 | 1156 | 	if (method_in_use(method, mad_reg_req)) | 
 | 1157 | 		goto error3; | 
 | 1158 |  | 
 | 1159 | 	/* Finally, add in methods being registered */ | 
 | 1160 | 	for (i = find_first_bit(mad_reg_req->method_mask, | 
 | 1161 | 				IB_MGMT_MAX_METHODS); | 
 | 1162 | 	     i < IB_MGMT_MAX_METHODS; | 
 | 1163 | 	     i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS, | 
 | 1164 | 			       1+i)) { | 
 | 1165 | 		(*method)->agent[i] = agent_priv; | 
 | 1166 | 	} | 
 | 1167 | 	return 0; | 
 | 1168 |  | 
 | 1169 | error3: | 
 | 1170 | 	/* Remove any methods for this mad agent */ | 
 | 1171 | 	remove_methods_mad_agent(*method, agent_priv); | 
 | 1172 | 	/* Now, check to see if there are any methods in use */ | 
 | 1173 | 	if (!check_method_table(*method)) { | 
 | 1174 | 		/* If not, release management method table */ | 
 | 1175 | 		kfree(*method); | 
 | 1176 | 		*method = NULL; | 
 | 1177 | 	} | 
 | 1178 | 	ret = -EINVAL; | 
 | 1179 | 	goto error1; | 
 | 1180 | error2: | 
 | 1181 | 	kfree(*class); | 
 | 1182 | 	*class = NULL; | 
 | 1183 | error1: | 
 | 1184 | 	return ret; | 
 | 1185 | } | 
 | 1186 |  | 
 | 1187 | static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, | 
 | 1188 | 			   struct ib_mad_agent_private *agent_priv) | 
 | 1189 | { | 
 | 1190 | 	struct ib_mad_port_private *port_priv; | 
 | 1191 | 	struct ib_mad_mgmt_vendor_class_table **vendor_table; | 
 | 1192 | 	struct ib_mad_mgmt_vendor_class_table *vendor = NULL; | 
 | 1193 | 	struct ib_mad_mgmt_vendor_class *vendor_class = NULL; | 
 | 1194 | 	struct ib_mad_mgmt_method_table **method; | 
 | 1195 | 	int i, ret = -ENOMEM; | 
 | 1196 | 	u8 vclass; | 
 | 1197 |  | 
 | 1198 | 	/* "New" vendor (with OUI) class */ | 
 | 1199 | 	vclass = vendor_class_index(mad_reg_req->mgmt_class); | 
 | 1200 | 	port_priv = agent_priv->qp_info->port_priv; | 
 | 1201 | 	vendor_table = &port_priv->version[ | 
 | 1202 | 				mad_reg_req->mgmt_class_version].vendor; | 
 | 1203 | 	if (!*vendor_table) { | 
 | 1204 | 		/* Allocate mgmt vendor class table for "new" class version */ | 
| Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 1205 | 		vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 | 		if (!vendor) { | 
 | 1207 | 			printk(KERN_ERR PFX "No memory for " | 
 | 1208 | 			       "ib_mad_mgmt_vendor_class_table\n"); | 
 | 1209 | 			goto error1; | 
 | 1210 | 		} | 
| Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 1211 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1212 | 		*vendor_table = vendor; | 
 | 1213 | 	} | 
 | 1214 | 	if (!(*vendor_table)->vendor_class[vclass]) { | 
 | 1215 | 		/* Allocate table for this management vendor class */ | 
| Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 1216 | 		vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1217 | 		if (!vendor_class) { | 
 | 1218 | 			printk(KERN_ERR PFX "No memory for " | 
 | 1219 | 			       "ib_mad_mgmt_vendor_class\n"); | 
 | 1220 | 			goto error2; | 
 | 1221 | 		} | 
| Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 1222 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1223 | 		(*vendor_table)->vendor_class[vclass] = vendor_class; | 
 | 1224 | 	} | 
 | 1225 | 	for (i = 0; i < MAX_MGMT_OUI; i++) { | 
 | 1226 | 		/* Is there matching OUI for this vendor class ? */ | 
 | 1227 | 		if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], | 
 | 1228 | 			    mad_reg_req->oui, 3)) { | 
 | 1229 | 			method = &(*vendor_table)->vendor_class[ | 
 | 1230 | 						vclass]->method_table[i]; | 
 | 1231 | 			BUG_ON(!*method); | 
 | 1232 | 			goto check_in_use; | 
 | 1233 | 		} | 
 | 1234 | 	} | 
 | 1235 | 	for (i = 0; i < MAX_MGMT_OUI; i++) { | 
 | 1236 | 		/* OUI slot available ? */ | 
 | 1237 | 		if (!is_vendor_oui((*vendor_table)->vendor_class[ | 
 | 1238 | 				vclass]->oui[i])) { | 
 | 1239 | 			method = &(*vendor_table)->vendor_class[ | 
 | 1240 | 				vclass]->method_table[i]; | 
 | 1241 | 			BUG_ON(*method); | 
 | 1242 | 			/* Allocate method table for this OUI */ | 
 | 1243 | 			if ((ret = allocate_method_table(method))) | 
 | 1244 | 				goto error3; | 
 | 1245 | 			memcpy((*vendor_table)->vendor_class[vclass]->oui[i], | 
 | 1246 | 			       mad_reg_req->oui, 3); | 
 | 1247 | 			goto check_in_use; | 
 | 1248 | 		} | 
 | 1249 | 	} | 
 | 1250 | 	printk(KERN_ERR PFX "All OUI slots in use\n"); | 
 | 1251 | 	goto error3; | 
 | 1252 |  | 
 | 1253 | check_in_use: | 
 | 1254 | 	/* Now, make sure methods are not already in use */ | 
 | 1255 | 	if (method_in_use(method, mad_reg_req)) | 
 | 1256 | 		goto error4; | 
 | 1257 |  | 
 | 1258 | 	/* Finally, add in methods being registered */ | 
 | 1259 | 	for (i = find_first_bit(mad_reg_req->method_mask, | 
 | 1260 | 				IB_MGMT_MAX_METHODS); | 
 | 1261 | 	     i < IB_MGMT_MAX_METHODS; | 
 | 1262 | 	     i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS, | 
 | 1263 | 			       1+i)) { | 
 | 1264 | 		(*method)->agent[i] = agent_priv; | 
 | 1265 | 	} | 
 | 1266 | 	return 0; | 
 | 1267 |  | 
 | 1268 | error4: | 
 | 1269 | 	/* Remove any methods for this mad agent */ | 
 | 1270 | 	remove_methods_mad_agent(*method, agent_priv); | 
 | 1271 | 	/* Now, check to see if there are any methods in use */ | 
 | 1272 | 	if (!check_method_table(*method)) { | 
 | 1273 | 		/* If not, release management method table */ | 
 | 1274 | 		kfree(*method); | 
 | 1275 | 		*method = NULL; | 
 | 1276 | 	} | 
 | 1277 | 	ret = -EINVAL; | 
 | 1278 | error3: | 
 | 1279 | 	if (vendor_class) { | 
 | 1280 | 		(*vendor_table)->vendor_class[vclass] = NULL; | 
 | 1281 | 		kfree(vendor_class); | 
 | 1282 | 	} | 
 | 1283 | error2: | 
 | 1284 | 	if (vendor) { | 
 | 1285 | 		*vendor_table = NULL; | 
 | 1286 | 		kfree(vendor); | 
 | 1287 | 	} | 
 | 1288 | error1: | 
 | 1289 | 	return ret; | 
 | 1290 | } | 
 | 1291 |  | 
 | 1292 | static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) | 
 | 1293 | { | 
 | 1294 | 	struct ib_mad_port_private *port_priv; | 
 | 1295 | 	struct ib_mad_mgmt_class_table *class; | 
 | 1296 | 	struct ib_mad_mgmt_method_table *method; | 
 | 1297 | 	struct ib_mad_mgmt_vendor_class_table *vendor; | 
 | 1298 | 	struct ib_mad_mgmt_vendor_class *vendor_class; | 
 | 1299 | 	int index; | 
 | 1300 | 	u8 mgmt_class; | 
 | 1301 |  | 
 | 1302 | 	/* | 
 | 1303 | 	 * Was MAD registration request supplied | 
 | 1304 | 	 * with original registration ? | 
 | 1305 | 	 */ | 
 | 1306 | 	if (!agent_priv->reg_req) { | 
 | 1307 | 		goto out; | 
 | 1308 | 	} | 
 | 1309 |  | 
 | 1310 | 	port_priv = agent_priv->qp_info->port_priv; | 
 | 1311 | 	mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); | 
 | 1312 | 	class = port_priv->version[ | 
 | 1313 | 			agent_priv->reg_req->mgmt_class_version].class; | 
 | 1314 | 	if (!class) | 
 | 1315 | 		goto vendor_check; | 
 | 1316 |  | 
 | 1317 | 	method = class->method_table[mgmt_class]; | 
 | 1318 | 	if (method) { | 
 | 1319 | 		/* Remove any methods for this mad agent */ | 
 | 1320 | 		remove_methods_mad_agent(method, agent_priv); | 
 | 1321 | 		/* Now, check to see if there are any methods still in use */ | 
 | 1322 | 		if (!check_method_table(method)) { | 
 | 1323 | 			/* If not, release management method table */ | 
 | 1324 | 			 kfree(method); | 
 | 1325 | 			 class->method_table[mgmt_class] = NULL; | 
 | 1326 | 			 /* Any management classes left ? */ | 
 | 1327 | 			if (!check_class_table(class)) { | 
 | 1328 | 				/* If not, release management class table */ | 
 | 1329 | 				kfree(class); | 
 | 1330 | 				port_priv->version[ | 
 | 1331 | 					agent_priv->reg_req-> | 
 | 1332 | 					mgmt_class_version].class = NULL; | 
 | 1333 | 			} | 
 | 1334 | 		} | 
 | 1335 | 	} | 
 | 1336 |  | 
 | 1337 | vendor_check: | 
 | 1338 | 	if (!is_vendor_class(mgmt_class)) | 
 | 1339 | 		goto out; | 
 | 1340 |  | 
 | 1341 | 	/* normalize mgmt_class to vendor range 2 */ | 
 | 1342 | 	mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); | 
 | 1343 | 	vendor = port_priv->version[ | 
 | 1344 | 			agent_priv->reg_req->mgmt_class_version].vendor; | 
 | 1345 |  | 
 | 1346 | 	if (!vendor) | 
 | 1347 | 		goto out; | 
 | 1348 |  | 
 | 1349 | 	vendor_class = vendor->vendor_class[mgmt_class]; | 
 | 1350 | 	if (vendor_class) { | 
 | 1351 | 		index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); | 
 | 1352 | 		if (index < 0) | 
 | 1353 | 			goto out; | 
 | 1354 | 		method = vendor_class->method_table[index]; | 
 | 1355 | 		if (method) { | 
 | 1356 | 			/* Remove any methods for this mad agent */ | 
 | 1357 | 			remove_methods_mad_agent(method, agent_priv); | 
 | 1358 | 			/* | 
 | 1359 | 			 * Now, check to see if there are | 
 | 1360 | 			 * any methods still in use | 
 | 1361 | 			 */ | 
 | 1362 | 			if (!check_method_table(method)) { | 
 | 1363 | 				/* If not, release management method table */ | 
 | 1364 | 				kfree(method); | 
 | 1365 | 				vendor_class->method_table[index] = NULL; | 
 | 1366 | 				memset(vendor_class->oui[index], 0, 3); | 
 | 1367 | 				/* Any OUIs left ? */ | 
 | 1368 | 				if (!check_vendor_class(vendor_class)) { | 
 | 1369 | 					/* If not, release vendor class table */ | 
 | 1370 | 					kfree(vendor_class); | 
 | 1371 | 					vendor->vendor_class[mgmt_class] = NULL; | 
 | 1372 | 					/* Any other vendor classes left ? */ | 
 | 1373 | 					if (!check_vendor_table(vendor)) { | 
 | 1374 | 						kfree(vendor); | 
 | 1375 | 						port_priv->version[ | 
 | 1376 | 							agent_priv->reg_req-> | 
 | 1377 | 							mgmt_class_version]. | 
 | 1378 | 							vendor = NULL; | 
 | 1379 | 					} | 
 | 1380 | 				} | 
 | 1381 | 			} | 
 | 1382 | 		} | 
 | 1383 | 	} | 
 | 1384 |  | 
 | 1385 | out: | 
 | 1386 | 	return; | 
 | 1387 | } | 
 | 1388 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1389 | static struct ib_mad_agent_private * | 
 | 1390 | find_mad_agent(struct ib_mad_port_private *port_priv, | 
| Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 1391 | 	       struct ib_mad *mad) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1392 | { | 
 | 1393 | 	struct ib_mad_agent_private *mad_agent = NULL; | 
 | 1394 | 	unsigned long flags; | 
 | 1395 |  | 
 | 1396 | 	spin_lock_irqsave(&port_priv->reg_lock, flags); | 
| Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 1397 | 	if (response_mad(mad)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1398 | 		u32 hi_tid; | 
 | 1399 | 		struct ib_mad_agent_private *entry; | 
 | 1400 |  | 
 | 1401 | 		/* | 
 | 1402 | 		 * Routing is based on high 32 bits of transaction ID | 
 | 1403 | 		 * of MAD. | 
 | 1404 | 		 */ | 
 | 1405 | 		hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32; | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1406 | 		list_for_each_entry(entry, &port_priv->agent_list, agent_list) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1407 | 			if (entry->agent.hi_tid == hi_tid) { | 
 | 1408 | 				mad_agent = entry; | 
 | 1409 | 				break; | 
 | 1410 | 			} | 
 | 1411 | 		} | 
 | 1412 | 	} else { | 
 | 1413 | 		struct ib_mad_mgmt_class_table *class; | 
 | 1414 | 		struct ib_mad_mgmt_method_table *method; | 
 | 1415 | 		struct ib_mad_mgmt_vendor_class_table *vendor; | 
 | 1416 | 		struct ib_mad_mgmt_vendor_class *vendor_class; | 
 | 1417 | 		struct ib_vendor_mad *vendor_mad; | 
 | 1418 | 		int index; | 
 | 1419 |  | 
 | 1420 | 		/* | 
 | 1421 | 		 * Routing is based on version, class, and method | 
 | 1422 | 		 * For "newer" vendor MADs, also based on OUI | 
 | 1423 | 		 */ | 
 | 1424 | 		if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION) | 
 | 1425 | 			goto out; | 
 | 1426 | 		if (!is_vendor_class(mad->mad_hdr.mgmt_class)) { | 
 | 1427 | 			class = port_priv->version[ | 
 | 1428 | 					mad->mad_hdr.class_version].class; | 
 | 1429 | 			if (!class) | 
 | 1430 | 				goto out; | 
 | 1431 | 			method = class->method_table[convert_mgmt_class( | 
 | 1432 | 							mad->mad_hdr.mgmt_class)]; | 
 | 1433 | 			if (method) | 
 | 1434 | 				mad_agent = method->agent[mad->mad_hdr.method & | 
 | 1435 | 							  ~IB_MGMT_METHOD_RESP]; | 
 | 1436 | 		} else { | 
 | 1437 | 			vendor = port_priv->version[ | 
 | 1438 | 					mad->mad_hdr.class_version].vendor; | 
 | 1439 | 			if (!vendor) | 
 | 1440 | 				goto out; | 
 | 1441 | 			vendor_class = vendor->vendor_class[vendor_class_index( | 
 | 1442 | 						mad->mad_hdr.mgmt_class)]; | 
 | 1443 | 			if (!vendor_class) | 
 | 1444 | 				goto out; | 
 | 1445 | 			/* Find matching OUI */ | 
 | 1446 | 			vendor_mad = (struct ib_vendor_mad *)mad; | 
 | 1447 | 			index = find_vendor_oui(vendor_class, vendor_mad->oui); | 
 | 1448 | 			if (index == -1) | 
 | 1449 | 				goto out; | 
 | 1450 | 			method = vendor_class->method_table[index]; | 
 | 1451 | 			if (method) { | 
 | 1452 | 				mad_agent = method->agent[mad->mad_hdr.method & | 
 | 1453 | 							  ~IB_MGMT_METHOD_RESP]; | 
 | 1454 | 			} | 
 | 1455 | 		} | 
 | 1456 | 	} | 
 | 1457 |  | 
 | 1458 | 	if (mad_agent) { | 
 | 1459 | 		if (mad_agent->agent.recv_handler) | 
 | 1460 | 			atomic_inc(&mad_agent->refcount); | 
 | 1461 | 		else { | 
 | 1462 | 			printk(KERN_NOTICE PFX "No receive handler for client " | 
 | 1463 | 			       "%p on port %d\n", | 
 | 1464 | 			       &mad_agent->agent, port_priv->port_num); | 
 | 1465 | 			mad_agent = NULL; | 
 | 1466 | 		} | 
 | 1467 | 	} | 
 | 1468 | out: | 
 | 1469 | 	spin_unlock_irqrestore(&port_priv->reg_lock, flags); | 
 | 1470 |  | 
 | 1471 | 	return mad_agent; | 
 | 1472 | } | 
 | 1473 |  | 
 | 1474 | static int validate_mad(struct ib_mad *mad, u32 qp_num) | 
 | 1475 | { | 
 | 1476 | 	int valid = 0; | 
 | 1477 |  | 
 | 1478 | 	/* Make sure MAD base version is understood */ | 
 | 1479 | 	if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { | 
 | 1480 | 		printk(KERN_ERR PFX "MAD received with unsupported base " | 
 | 1481 | 		       "version %d\n", mad->mad_hdr.base_version); | 
 | 1482 | 		goto out; | 
 | 1483 | 	} | 
 | 1484 |  | 
 | 1485 | 	/* Filter SMI packets sent to other than QP0 */ | 
 | 1486 | 	if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || | 
 | 1487 | 	    (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { | 
 | 1488 | 		if (qp_num == 0) | 
 | 1489 | 			valid = 1; | 
 | 1490 | 	} else { | 
 | 1491 | 		/* Filter GSI packets sent to QP0 */ | 
 | 1492 | 		if (qp_num != 0) | 
 | 1493 | 			valid = 1; | 
 | 1494 | 	} | 
 | 1495 |  | 
 | 1496 | out: | 
 | 1497 | 	return valid; | 
 | 1498 | } | 
 | 1499 |  | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1500 | static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, | 
 | 1501 | 		       struct ib_mad_hdr *mad_hdr) | 
 | 1502 | { | 
 | 1503 | 	struct ib_rmpp_mad *rmpp_mad; | 
 | 1504 |  | 
 | 1505 | 	rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; | 
 | 1506 | 	return !mad_agent_priv->agent.rmpp_version || | 
 | 1507 | 		!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & | 
 | 1508 | 				    IB_MGMT_RMPP_FLAG_ACTIVE) || | 
 | 1509 | 		(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); | 
 | 1510 | } | 
 | 1511 |  | 
 | 1512 | struct ib_mad_send_wr_private* | 
| Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 1513 | ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1514 | { | 
 | 1515 | 	struct ib_mad_send_wr_private *mad_send_wr; | 
 | 1516 |  | 
 | 1517 | 	list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, | 
 | 1518 | 			    agent_list) { | 
 | 1519 | 		if (mad_send_wr->tid == tid) | 
 | 1520 | 			return mad_send_wr; | 
 | 1521 | 	} | 
 | 1522 |  | 
 | 1523 | 	/* | 
 | 1524 | 	 * It's possible to receive the response before we've | 
 | 1525 | 	 * been notified that the send has completed | 
 | 1526 | 	 */ | 
 | 1527 | 	list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, | 
 | 1528 | 			    agent_list) { | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1529 | 		if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1530 | 		    mad_send_wr->tid == tid && mad_send_wr->timeout) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1531 | 			/* Verify request has not been canceled */ | 
 | 1532 | 			return (mad_send_wr->status == IB_WC_SUCCESS) ? | 
 | 1533 | 				mad_send_wr : NULL; | 
 | 1534 | 		} | 
 | 1535 | 	} | 
 | 1536 | 	return NULL; | 
 | 1537 | } | 
 | 1538 |  | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1539 | void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) | 
| Hal Rosenstock | 6a0c435 | 2005-07-27 11:45:26 -0700 | [diff] [blame] | 1540 | { | 
 | 1541 | 	mad_send_wr->timeout = 0; | 
 | 1542 | 	if (mad_send_wr->refcount == 1) { | 
 | 1543 | 		list_del(&mad_send_wr->agent_list); | 
 | 1544 | 		list_add_tail(&mad_send_wr->agent_list, | 
 | 1545 | 			      &mad_send_wr->mad_agent_priv->done_list); | 
 | 1546 | 	} | 
 | 1547 | } | 
 | 1548 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1549 | static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | 
| Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 1550 | 				 struct ib_mad_recv_wc *mad_recv_wc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1551 | { | 
 | 1552 | 	struct ib_mad_send_wr_private *mad_send_wr; | 
 | 1553 | 	struct ib_mad_send_wc mad_send_wc; | 
 | 1554 | 	unsigned long flags; | 
| Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 1555 | 	__be64 tid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1556 |  | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1557 | 	INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); | 
 | 1558 | 	list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); | 
 | 1559 | 	if (mad_agent_priv->agent.rmpp_version) { | 
 | 1560 | 		mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, | 
 | 1561 | 						      mad_recv_wc); | 
 | 1562 | 		if (!mad_recv_wc) { | 
 | 1563 | 			if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 
 | 1564 | 				wake_up(&mad_agent_priv->wait); | 
 | 1565 | 			return; | 
 | 1566 | 		} | 
 | 1567 | 	} | 
 | 1568 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1569 | 	/* Complete corresponding request */ | 
| Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 1570 | 	if (response_mad(mad_recv_wc->recv_buf.mad)) { | 
 | 1571 | 		tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1572 | 		spin_lock_irqsave(&mad_agent_priv->lock, flags); | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1573 | 		mad_send_wr = ib_find_send_mad(mad_agent_priv, tid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1574 | 		if (!mad_send_wr) { | 
 | 1575 | 			spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 
| Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 1576 | 			ib_free_recv_mad(mad_recv_wc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1577 | 			if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 
 | 1578 | 				wake_up(&mad_agent_priv->wait); | 
 | 1579 | 			return; | 
 | 1580 | 		} | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1581 | 		ib_mark_mad_done(mad_send_wr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1582 | 		spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 
 | 1583 |  | 
 | 1584 | 		/* Defined behavior is to complete response before request */ | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1585 | 		mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; | 
| Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 1586 | 		mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, | 
 | 1587 | 						   mad_recv_wc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1588 | 		atomic_dec(&mad_agent_priv->refcount); | 
 | 1589 |  | 
 | 1590 | 		mad_send_wc.status = IB_WC_SUCCESS; | 
 | 1591 | 		mad_send_wc.vendor_err = 0; | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1592 | 		mad_send_wc.send_buf = &mad_send_wr->send_buf; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1593 | 		ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); | 
 | 1594 | 	} else { | 
| Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 1595 | 		mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, | 
 | 1596 | 						   mad_recv_wc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1597 | 		if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 
 | 1598 | 			wake_up(&mad_agent_priv->wait); | 
 | 1599 | 	} | 
 | 1600 | } | 
 | 1601 |  | 
 | 1602 | static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, | 
 | 1603 | 				     struct ib_wc *wc) | 
 | 1604 | { | 
 | 1605 | 	struct ib_mad_qp_info *qp_info; | 
 | 1606 | 	struct ib_mad_private_header *mad_priv_hdr; | 
 | 1607 | 	struct ib_mad_private *recv, *response; | 
 | 1608 | 	struct ib_mad_list_head *mad_list; | 
 | 1609 | 	struct ib_mad_agent_private *mad_agent; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1610 |  | 
 | 1611 | 	response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); | 
 | 1612 | 	if (!response) | 
 | 1613 | 		printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory " | 
 | 1614 | 		       "for response buffer\n"); | 
 | 1615 |  | 
 | 1616 | 	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; | 
 | 1617 | 	qp_info = mad_list->mad_queue->qp_info; | 
 | 1618 | 	dequeue_mad(mad_list); | 
 | 1619 |  | 
 | 1620 | 	mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, | 
 | 1621 | 				    mad_list); | 
 | 1622 | 	recv = container_of(mad_priv_hdr, struct ib_mad_private, header); | 
 | 1623 | 	dma_unmap_single(port_priv->device->dma_device, | 
 | 1624 | 			 pci_unmap_addr(&recv->header, mapping), | 
 | 1625 | 			 sizeof(struct ib_mad_private) - | 
 | 1626 | 			 sizeof(struct ib_mad_private_header), | 
 | 1627 | 			 DMA_FROM_DEVICE); | 
 | 1628 |  | 
 | 1629 | 	/* Setup MAD receive work completion from "normal" work completion */ | 
| Sean Hefty | 24239af | 2005-04-16 15:26:08 -0700 | [diff] [blame] | 1630 | 	recv->header.wc = *wc; | 
 | 1631 | 	recv->header.recv_wc.wc = &recv->header.wc; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1632 | 	recv->header.recv_wc.mad_len = sizeof(struct ib_mad); | 
 | 1633 | 	recv->header.recv_wc.recv_buf.mad = &recv->mad.mad; | 
 | 1634 | 	recv->header.recv_wc.recv_buf.grh = &recv->grh; | 
 | 1635 |  | 
 | 1636 | 	if (atomic_read(&qp_info->snoop_count)) | 
 | 1637 | 		snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); | 
 | 1638 |  | 
 | 1639 | 	/* Validate MAD */ | 
 | 1640 | 	if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num)) | 
 | 1641 | 		goto out; | 
 | 1642 |  | 
 | 1643 | 	if (recv->mad.mad.mad_hdr.mgmt_class == | 
 | 1644 | 	    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | 
 | 1645 | 		if (!smi_handle_dr_smp_recv(&recv->mad.smp, | 
 | 1646 | 					    port_priv->device->node_type, | 
 | 1647 | 					    port_priv->port_num, | 
 | 1648 | 					    port_priv->device->phys_port_cnt)) | 
 | 1649 | 			goto out; | 
 | 1650 | 		if (!smi_check_forward_dr_smp(&recv->mad.smp)) | 
 | 1651 | 			goto local; | 
 | 1652 | 		if (!smi_handle_dr_smp_send(&recv->mad.smp, | 
 | 1653 | 					    port_priv->device->node_type, | 
 | 1654 | 					    port_priv->port_num)) | 
 | 1655 | 			goto out; | 
 | 1656 | 		if (!smi_check_local_dr_smp(&recv->mad.smp, | 
 | 1657 | 					    port_priv->device, | 
 | 1658 | 					    port_priv->port_num)) | 
 | 1659 | 			goto out; | 
 | 1660 | 	} | 
 | 1661 |  | 
 | 1662 | local: | 
 | 1663 | 	/* Give driver "right of first refusal" on incoming MAD */ | 
 | 1664 | 	if (port_priv->device->process_mad) { | 
 | 1665 | 		int ret; | 
 | 1666 |  | 
 | 1667 | 		if (!response) { | 
 | 1668 | 			printk(KERN_ERR PFX "No memory for response MAD\n"); | 
 | 1669 | 			/* | 
 | 1670 | 			 * Is it better to assume that | 
 | 1671 | 			 * it wouldn't be processed ? | 
 | 1672 | 			 */ | 
 | 1673 | 			goto out; | 
 | 1674 | 		} | 
 | 1675 |  | 
 | 1676 | 		ret = port_priv->device->process_mad(port_priv->device, 0, | 
 | 1677 | 						     port_priv->port_num, | 
 | 1678 | 						     wc, &recv->grh, | 
 | 1679 | 						     &recv->mad.mad, | 
 | 1680 | 						     &response->mad.mad); | 
 | 1681 | 		if (ret & IB_MAD_RESULT_SUCCESS) { | 
 | 1682 | 			if (ret & IB_MAD_RESULT_CONSUMED) | 
 | 1683 | 				goto out; | 
 | 1684 | 			if (ret & IB_MAD_RESULT_REPLY) { | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1685 | 				agent_send_response(&response->mad.mad, | 
 | 1686 | 						    &recv->grh, wc, | 
 | 1687 | 						    port_priv->device, | 
 | 1688 | 						    port_priv->port_num, | 
 | 1689 | 						    qp_info->qp->qp_num); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1690 | 				goto out; | 
 | 1691 | 			} | 
 | 1692 | 		} | 
 | 1693 | 	} | 
 | 1694 |  | 
| Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 1695 | 	mad_agent = find_mad_agent(port_priv, &recv->mad.mad); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1696 | 	if (mad_agent) { | 
| Hal Rosenstock | 4a0754f | 2005-07-27 11:45:24 -0700 | [diff] [blame] | 1697 | 		ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1698 | 		/* | 
 | 1699 | 		 * recv is freed up in error cases in ib_mad_complete_recv | 
 | 1700 | 		 * or via recv_handler in ib_mad_complete_recv() | 
 | 1701 | 		 */ | 
 | 1702 | 		recv = NULL; | 
 | 1703 | 	} | 
 | 1704 |  | 
 | 1705 | out: | 
 | 1706 | 	/* Post another receive request for this QP */ | 
 | 1707 | 	if (response) { | 
 | 1708 | 		ib_mad_post_receive_mads(qp_info, response); | 
 | 1709 | 		if (recv) | 
 | 1710 | 			kmem_cache_free(ib_mad_cache, recv); | 
 | 1711 | 	} else | 
 | 1712 | 		ib_mad_post_receive_mads(qp_info, recv); | 
 | 1713 | } | 
 | 1714 |  | 
 | 1715 | static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) | 
 | 1716 | { | 
 | 1717 | 	struct ib_mad_send_wr_private *mad_send_wr; | 
 | 1718 | 	unsigned long delay; | 
 | 1719 |  | 
 | 1720 | 	if (list_empty(&mad_agent_priv->wait_list)) { | 
 | 1721 | 		cancel_delayed_work(&mad_agent_priv->timed_work); | 
 | 1722 | 	} else { | 
 | 1723 | 		mad_send_wr = list_entry(mad_agent_priv->wait_list.next, | 
 | 1724 | 					 struct ib_mad_send_wr_private, | 
 | 1725 | 					 agent_list); | 
 | 1726 |  | 
 | 1727 | 		if (time_after(mad_agent_priv->timeout, | 
 | 1728 | 			       mad_send_wr->timeout)) { | 
 | 1729 | 			mad_agent_priv->timeout = mad_send_wr->timeout; | 
 | 1730 | 			cancel_delayed_work(&mad_agent_priv->timed_work); | 
 | 1731 | 			delay = mad_send_wr->timeout - jiffies; | 
 | 1732 | 			if ((long)delay <= 0) | 
 | 1733 | 				delay = 1; | 
 | 1734 | 			queue_delayed_work(mad_agent_priv->qp_info-> | 
 | 1735 | 					   port_priv->wq, | 
 | 1736 | 					   &mad_agent_priv->timed_work, delay); | 
 | 1737 | 		} | 
 | 1738 | 	} | 
 | 1739 | } | 
 | 1740 |  | 
| Hal Rosenstock | d760ce8 | 2005-07-27 11:45:25 -0700 | [diff] [blame] | 1741 | static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1742 | { | 
| Hal Rosenstock | d760ce8 | 2005-07-27 11:45:25 -0700 | [diff] [blame] | 1743 | 	struct ib_mad_agent_private *mad_agent_priv; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1744 | 	struct ib_mad_send_wr_private *temp_mad_send_wr; | 
 | 1745 | 	struct list_head *list_item; | 
 | 1746 | 	unsigned long delay; | 
 | 1747 |  | 
| Hal Rosenstock | d760ce8 | 2005-07-27 11:45:25 -0700 | [diff] [blame] | 1748 | 	mad_agent_priv = mad_send_wr->mad_agent_priv; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1749 | 	list_del(&mad_send_wr->agent_list); | 
 | 1750 |  | 
 | 1751 | 	delay = mad_send_wr->timeout; | 
 | 1752 | 	mad_send_wr->timeout += jiffies; | 
 | 1753 |  | 
| Hal Rosenstock | 29bb33d | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 1754 | 	if (delay) { | 
 | 1755 | 		list_for_each_prev(list_item, &mad_agent_priv->wait_list) { | 
 | 1756 | 			temp_mad_send_wr = list_entry(list_item, | 
 | 1757 | 						struct ib_mad_send_wr_private, | 
 | 1758 | 						agent_list); | 
 | 1759 | 			if (time_after(mad_send_wr->timeout, | 
 | 1760 | 				       temp_mad_send_wr->timeout)) | 
 | 1761 | 				break; | 
 | 1762 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1763 | 	} | 
| Hal Rosenstock | 29bb33d | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 1764 | 	else | 
 | 1765 | 		list_item = &mad_agent_priv->wait_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1766 | 	list_add(&mad_send_wr->agent_list, list_item); | 
 | 1767 |  | 
 | 1768 | 	/* Reschedule a work item if we have a shorter timeout */ | 
 | 1769 | 	if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) { | 
 | 1770 | 		cancel_delayed_work(&mad_agent_priv->timed_work); | 
 | 1771 | 		queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq, | 
 | 1772 | 				   &mad_agent_priv->timed_work, delay); | 
 | 1773 | 	} | 
 | 1774 | } | 
 | 1775 |  | 
| Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 1776 | void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, | 
 | 1777 | 			  int timeout_ms) | 
 | 1778 | { | 
 | 1779 | 	mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); | 
 | 1780 | 	wait_for_response(mad_send_wr); | 
 | 1781 | } | 
 | 1782 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1783 | /* | 
 | 1784 |  * Process a send work completion | 
 | 1785 |  */ | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1786 | void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, | 
 | 1787 | 			     struct ib_mad_send_wc *mad_send_wc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1788 | { | 
 | 1789 | 	struct ib_mad_agent_private	*mad_agent_priv; | 
 | 1790 | 	unsigned long			flags; | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1791 | 	int				ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1792 |  | 
| Hal Rosenstock | d760ce8 | 2005-07-27 11:45:25 -0700 | [diff] [blame] | 1793 | 	mad_agent_priv = mad_send_wr->mad_agent_priv; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1794 | 	spin_lock_irqsave(&mad_agent_priv->lock, flags); | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1795 | 	if (mad_agent_priv->agent.rmpp_version) { | 
 | 1796 | 		ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); | 
 | 1797 | 		if (ret == IB_RMPP_RESULT_CONSUMED) | 
 | 1798 | 			goto done; | 
 | 1799 | 	} else | 
 | 1800 | 		ret = IB_RMPP_RESULT_UNHANDLED; | 
 | 1801 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1802 | 	if (mad_send_wc->status != IB_WC_SUCCESS && | 
 | 1803 | 	    mad_send_wr->status == IB_WC_SUCCESS) { | 
 | 1804 | 		mad_send_wr->status = mad_send_wc->status; | 
 | 1805 | 		mad_send_wr->refcount -= (mad_send_wr->timeout > 0); | 
 | 1806 | 	} | 
 | 1807 |  | 
 | 1808 | 	if (--mad_send_wr->refcount > 0) { | 
 | 1809 | 		if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && | 
 | 1810 | 		    mad_send_wr->status == IB_WC_SUCCESS) { | 
| Hal Rosenstock | d760ce8 | 2005-07-27 11:45:25 -0700 | [diff] [blame] | 1811 | 			wait_for_response(mad_send_wr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1812 | 		} | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1813 | 		goto done; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1814 | 	} | 
 | 1815 |  | 
 | 1816 | 	/* Remove send from MAD agent and notify client of completion */ | 
 | 1817 | 	list_del(&mad_send_wr->agent_list); | 
 | 1818 | 	adjust_timeout(mad_agent_priv); | 
 | 1819 | 	spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 
 | 1820 |  | 
 | 1821 | 	if (mad_send_wr->status != IB_WC_SUCCESS ) | 
 | 1822 | 		mad_send_wc->status = mad_send_wr->status; | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1823 | 	if (ret == IB_RMPP_RESULT_INTERNAL) | 
 | 1824 | 		ib_rmpp_send_handler(mad_send_wc); | 
 | 1825 | 	else | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1826 | 		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | 
 | 1827 | 						   mad_send_wc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1828 |  | 
 | 1829 | 	/* Release reference on agent taken when sending */ | 
 | 1830 | 	if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 
 | 1831 | 		wake_up(&mad_agent_priv->wait); | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 1832 | 	return; | 
 | 1833 | done: | 
 | 1834 | 	spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1835 | } | 
 | 1836 |  | 
 | 1837 | static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, | 
 | 1838 | 				     struct ib_wc *wc) | 
 | 1839 | { | 
 | 1840 | 	struct ib_mad_send_wr_private	*mad_send_wr, *queued_send_wr; | 
 | 1841 | 	struct ib_mad_list_head		*mad_list; | 
 | 1842 | 	struct ib_mad_qp_info		*qp_info; | 
 | 1843 | 	struct ib_mad_queue		*send_queue; | 
 | 1844 | 	struct ib_send_wr		*bad_send_wr; | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1845 | 	struct ib_mad_send_wc		mad_send_wc; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1846 | 	unsigned long flags; | 
 | 1847 | 	int ret; | 
 | 1848 |  | 
 | 1849 | 	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; | 
 | 1850 | 	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, | 
 | 1851 | 				   mad_list); | 
 | 1852 | 	send_queue = mad_list->mad_queue; | 
 | 1853 | 	qp_info = send_queue->qp_info; | 
 | 1854 |  | 
 | 1855 | retry: | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1856 | 	dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, | 
 | 1857 | 			 pci_unmap_addr(mad_send_wr, mapping), | 
 | 1858 | 			 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1859 | 	queued_send_wr = NULL; | 
 | 1860 | 	spin_lock_irqsave(&send_queue->lock, flags); | 
 | 1861 | 	list_del(&mad_list->list); | 
 | 1862 |  | 
 | 1863 | 	/* Move queued send to the send queue */ | 
 | 1864 | 	if (send_queue->count-- > send_queue->max_active) { | 
 | 1865 | 		mad_list = container_of(qp_info->overflow_list.next, | 
 | 1866 | 					struct ib_mad_list_head, list); | 
 | 1867 | 		queued_send_wr = container_of(mad_list, | 
 | 1868 | 					struct ib_mad_send_wr_private, | 
 | 1869 | 					mad_list); | 
 | 1870 | 		list_del(&mad_list->list); | 
 | 1871 | 		list_add_tail(&mad_list->list, &send_queue->list); | 
 | 1872 | 	} | 
 | 1873 | 	spin_unlock_irqrestore(&send_queue->lock, flags); | 
 | 1874 |  | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1875 | 	mad_send_wc.send_buf = &mad_send_wr->send_buf; | 
 | 1876 | 	mad_send_wc.status = wc->status; | 
 | 1877 | 	mad_send_wc.vendor_err = wc->vendor_err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1878 | 	if (atomic_read(&qp_info->snoop_count)) | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1879 | 		snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1880 | 			   IB_MAD_SNOOP_SEND_COMPLETIONS); | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1881 | 	ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1882 |  | 
 | 1883 | 	if (queued_send_wr) { | 
 | 1884 | 		ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 1885 | 				   &bad_send_wr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1886 | 		if (ret) { | 
 | 1887 | 			printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret); | 
 | 1888 | 			mad_send_wr = queued_send_wr; | 
 | 1889 | 			wc->status = IB_WC_LOC_QP_OP_ERR; | 
 | 1890 | 			goto retry; | 
 | 1891 | 		} | 
 | 1892 | 	} | 
 | 1893 | } | 
 | 1894 |  | 
 | 1895 | static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) | 
 | 1896 | { | 
 | 1897 | 	struct ib_mad_send_wr_private *mad_send_wr; | 
 | 1898 | 	struct ib_mad_list_head *mad_list; | 
 | 1899 | 	unsigned long flags; | 
 | 1900 |  | 
 | 1901 | 	spin_lock_irqsave(&qp_info->send_queue.lock, flags); | 
 | 1902 | 	list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { | 
 | 1903 | 		mad_send_wr = container_of(mad_list, | 
 | 1904 | 					   struct ib_mad_send_wr_private, | 
 | 1905 | 					   mad_list); | 
 | 1906 | 		mad_send_wr->retry = 1; | 
 | 1907 | 	} | 
 | 1908 | 	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | 
 | 1909 | } | 
 | 1910 |  | 
 | 1911 | static void mad_error_handler(struct ib_mad_port_private *port_priv, | 
 | 1912 | 			      struct ib_wc *wc) | 
 | 1913 | { | 
 | 1914 | 	struct ib_mad_list_head *mad_list; | 
 | 1915 | 	struct ib_mad_qp_info *qp_info; | 
 | 1916 | 	struct ib_mad_send_wr_private *mad_send_wr; | 
 | 1917 | 	int ret; | 
 | 1918 |  | 
 | 1919 | 	/* Determine if failure was a send or receive */ | 
 | 1920 | 	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; | 
 | 1921 | 	qp_info = mad_list->mad_queue->qp_info; | 
 | 1922 | 	if (mad_list->mad_queue == &qp_info->recv_queue) | 
 | 1923 | 		/* | 
 | 1924 | 		 * Receive errors indicate that the QP has entered the error | 
 | 1925 | 		 * state - error handling/shutdown code will cleanup | 
 | 1926 | 		 */ | 
 | 1927 | 		return; | 
 | 1928 |  | 
 | 1929 | 	/* | 
 | 1930 | 	 * Send errors will transition the QP to SQE - move | 
 | 1931 | 	 * QP to RTS and repost flushed work requests | 
 | 1932 | 	 */ | 
 | 1933 | 	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, | 
 | 1934 | 				   mad_list); | 
 | 1935 | 	if (wc->status == IB_WC_WR_FLUSH_ERR) { | 
 | 1936 | 		if (mad_send_wr->retry) { | 
 | 1937 | 			/* Repost send */ | 
 | 1938 | 			struct ib_send_wr *bad_send_wr; | 
 | 1939 |  | 
 | 1940 | 			mad_send_wr->retry = 0; | 
 | 1941 | 			ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr, | 
 | 1942 | 					&bad_send_wr); | 
 | 1943 | 			if (ret) | 
 | 1944 | 				ib_mad_send_done_handler(port_priv, wc); | 
 | 1945 | 		} else | 
 | 1946 | 			ib_mad_send_done_handler(port_priv, wc); | 
 | 1947 | 	} else { | 
 | 1948 | 		struct ib_qp_attr *attr; | 
 | 1949 |  | 
 | 1950 | 		/* Transition QP to RTS and fail offending send */ | 
 | 1951 | 		attr = kmalloc(sizeof *attr, GFP_KERNEL); | 
 | 1952 | 		if (attr) { | 
 | 1953 | 			attr->qp_state = IB_QPS_RTS; | 
 | 1954 | 			attr->cur_qp_state = IB_QPS_SQE; | 
 | 1955 | 			ret = ib_modify_qp(qp_info->qp, attr, | 
 | 1956 | 					   IB_QP_STATE | IB_QP_CUR_STATE); | 
 | 1957 | 			kfree(attr); | 
 | 1958 | 			if (ret) | 
 | 1959 | 				printk(KERN_ERR PFX "mad_error_handler - " | 
 | 1960 | 				       "ib_modify_qp to RTS : %d\n", ret); | 
 | 1961 | 			else | 
 | 1962 | 				mark_sends_for_retry(qp_info); | 
 | 1963 | 		} | 
 | 1964 | 		ib_mad_send_done_handler(port_priv, wc); | 
 | 1965 | 	} | 
 | 1966 | } | 
 | 1967 |  | 
 | 1968 | /* | 
 | 1969 |  * IB MAD completion callback | 
 | 1970 |  */ | 
 | 1971 | static void ib_mad_completion_handler(void *data) | 
 | 1972 | { | 
 | 1973 | 	struct ib_mad_port_private *port_priv; | 
 | 1974 | 	struct ib_wc wc; | 
 | 1975 |  | 
 | 1976 | 	port_priv = (struct ib_mad_port_private *)data; | 
 | 1977 | 	ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); | 
 | 1978 |  | 
 | 1979 | 	while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { | 
 | 1980 | 		if (wc.status == IB_WC_SUCCESS) { | 
 | 1981 | 			switch (wc.opcode) { | 
 | 1982 | 			case IB_WC_SEND: | 
 | 1983 | 				ib_mad_send_done_handler(port_priv, &wc); | 
 | 1984 | 				break; | 
 | 1985 | 			case IB_WC_RECV: | 
 | 1986 | 				ib_mad_recv_done_handler(port_priv, &wc); | 
 | 1987 | 				break; | 
 | 1988 | 			default: | 
 | 1989 | 				BUG_ON(1); | 
 | 1990 | 				break; | 
 | 1991 | 			} | 
 | 1992 | 		} else | 
 | 1993 | 			mad_error_handler(port_priv, &wc); | 
 | 1994 | 	} | 
 | 1995 | } | 
 | 1996 |  | 
 | 1997 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) | 
 | 1998 | { | 
 | 1999 | 	unsigned long flags; | 
 | 2000 | 	struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; | 
 | 2001 | 	struct ib_mad_send_wc mad_send_wc; | 
 | 2002 | 	struct list_head cancel_list; | 
 | 2003 |  | 
 | 2004 | 	INIT_LIST_HEAD(&cancel_list); | 
 | 2005 |  | 
 | 2006 | 	spin_lock_irqsave(&mad_agent_priv->lock, flags); | 
 | 2007 | 	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, | 
 | 2008 | 				 &mad_agent_priv->send_list, agent_list) { | 
 | 2009 | 		if (mad_send_wr->status == IB_WC_SUCCESS) { | 
 | 2010 |  			mad_send_wr->status = IB_WC_WR_FLUSH_ERR; | 
 | 2011 | 			mad_send_wr->refcount -= (mad_send_wr->timeout > 0); | 
 | 2012 | 		} | 
 | 2013 | 	} | 
 | 2014 |  | 
 | 2015 | 	/* Empty wait list to prevent receives from finding a request */ | 
 | 2016 | 	list_splice_init(&mad_agent_priv->wait_list, &cancel_list); | 
| Hal Rosenstock | 2c153b9 | 2005-07-27 11:45:31 -0700 | [diff] [blame] | 2017 | 	/* Empty local completion list as well */ | 
 | 2018 | 	list_splice_init(&mad_agent_priv->local_list, &cancel_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2019 | 	spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 
 | 2020 |  | 
 | 2021 | 	/* Report all cancelled requests */ | 
 | 2022 | 	mad_send_wc.status = IB_WC_WR_FLUSH_ERR; | 
 | 2023 | 	mad_send_wc.vendor_err = 0; | 
 | 2024 |  | 
 | 2025 | 	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, | 
 | 2026 | 				 &cancel_list, agent_list) { | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2027 | 		mad_send_wc.send_buf = &mad_send_wr->send_buf; | 
 | 2028 | 		list_del(&mad_send_wr->agent_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2029 | 		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | 
 | 2030 | 						   &mad_send_wc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2031 | 		atomic_dec(&mad_agent_priv->refcount); | 
 | 2032 | 	} | 
 | 2033 | } | 
 | 2034 |  | 
 | 2035 | static struct ib_mad_send_wr_private* | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2036 | find_send_wr(struct ib_mad_agent_private *mad_agent_priv, | 
 | 2037 | 	     struct ib_mad_send_buf *send_buf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2038 | { | 
 | 2039 | 	struct ib_mad_send_wr_private *mad_send_wr; | 
 | 2040 |  | 
 | 2041 | 	list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, | 
 | 2042 | 			    agent_list) { | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2043 | 		if (&mad_send_wr->send_buf == send_buf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2044 | 			return mad_send_wr; | 
 | 2045 | 	} | 
 | 2046 |  | 
 | 2047 | 	list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, | 
 | 2048 | 			    agent_list) { | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2049 | 		if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && | 
 | 2050 | 		    &mad_send_wr->send_buf == send_buf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2051 | 			return mad_send_wr; | 
 | 2052 | 	} | 
 | 2053 | 	return NULL; | 
 | 2054 | } | 
 | 2055 |  | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2056 | int ib_modify_mad(struct ib_mad_agent *mad_agent, | 
 | 2057 | 		  struct ib_mad_send_buf *send_buf, u32 timeout_ms) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2058 | { | 
 | 2059 | 	struct ib_mad_agent_private *mad_agent_priv; | 
 | 2060 | 	struct ib_mad_send_wr_private *mad_send_wr; | 
 | 2061 | 	unsigned long flags; | 
| Hal Rosenstock | cabe3cb | 2005-07-27 11:45:33 -0700 | [diff] [blame] | 2062 | 	int active; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2063 |  | 
 | 2064 | 	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, | 
 | 2065 | 				      agent); | 
 | 2066 | 	spin_lock_irqsave(&mad_agent_priv->lock, flags); | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2067 | 	mad_send_wr = find_send_wr(mad_agent_priv, send_buf); | 
| Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2068 | 	if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2069 | 		spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 
| Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2070 | 		return -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2071 | 	} | 
 | 2072 |  | 
| Hal Rosenstock | cabe3cb | 2005-07-27 11:45:33 -0700 | [diff] [blame] | 2073 | 	active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); | 
| Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2074 | 	if (!timeout_ms) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2075 | 		mad_send_wr->status = IB_WC_WR_FLUSH_ERR; | 
| Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2076 | 		mad_send_wr->refcount -= (mad_send_wr->timeout > 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2077 | 	} | 
 | 2078 |  | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2079 | 	mad_send_wr->send_buf.timeout_ms = timeout_ms; | 
| Hal Rosenstock | cabe3cb | 2005-07-27 11:45:33 -0700 | [diff] [blame] | 2080 | 	if (active) | 
| Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2081 | 		mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); | 
 | 2082 | 	else | 
 | 2083 | 		ib_reset_mad_timeout(mad_send_wr, timeout_ms); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2084 |  | 
| Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2085 | 	spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 
 | 2086 | 	return 0; | 
 | 2087 | } | 
 | 2088 | EXPORT_SYMBOL(ib_modify_mad); | 
 | 2089 |  | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2090 | void ib_cancel_mad(struct ib_mad_agent *mad_agent, | 
 | 2091 | 		   struct ib_mad_send_buf *send_buf) | 
| Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2092 | { | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2093 | 	ib_modify_mad(mad_agent, send_buf, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2094 | } | 
 | 2095 | EXPORT_SYMBOL(ib_cancel_mad); | 
 | 2096 |  | 
 | 2097 | static void local_completions(void *data) | 
 | 2098 | { | 
 | 2099 | 	struct ib_mad_agent_private *mad_agent_priv; | 
 | 2100 | 	struct ib_mad_local_private *local; | 
 | 2101 | 	struct ib_mad_agent_private *recv_mad_agent; | 
 | 2102 | 	unsigned long flags; | 
| Hal Rosenstock | 2c153b9 | 2005-07-27 11:45:31 -0700 | [diff] [blame] | 2103 | 	int recv = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2104 | 	struct ib_wc wc; | 
 | 2105 | 	struct ib_mad_send_wc mad_send_wc; | 
 | 2106 |  | 
 | 2107 | 	mad_agent_priv = (struct ib_mad_agent_private *)data; | 
 | 2108 |  | 
 | 2109 | 	spin_lock_irqsave(&mad_agent_priv->lock, flags); | 
 | 2110 | 	while (!list_empty(&mad_agent_priv->local_list)) { | 
 | 2111 | 		local = list_entry(mad_agent_priv->local_list.next, | 
 | 2112 | 				   struct ib_mad_local_private, | 
 | 2113 | 				   completion_list); | 
 | 2114 | 		spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 
 | 2115 | 		if (local->mad_priv) { | 
 | 2116 | 			recv_mad_agent = local->recv_mad_agent; | 
 | 2117 | 			if (!recv_mad_agent) { | 
 | 2118 | 				printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2119 | 				goto local_send_completion; | 
 | 2120 | 			} | 
 | 2121 |  | 
| Hal Rosenstock | 2c153b9 | 2005-07-27 11:45:31 -0700 | [diff] [blame] | 2122 | 			recv = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2123 | 			/* | 
 | 2124 | 			 * Defined behavior is to complete response | 
 | 2125 | 			 * before request | 
 | 2126 | 			 */ | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2127 | 			build_smp_wc((unsigned long) local->mad_send_wr, | 
| Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 2128 | 				     be16_to_cpu(IB_LID_PERMISSIVE), | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2129 | 				     0, recv_mad_agent->agent.port_num, &wc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2130 |  | 
 | 2131 | 			local->mad_priv->header.recv_wc.wc = &wc; | 
 | 2132 | 			local->mad_priv->header.recv_wc.mad_len = | 
 | 2133 | 						sizeof(struct ib_mad); | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 2134 | 			INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); | 
 | 2135 | 			list_add(&local->mad_priv->header.recv_wc.recv_buf.list, | 
 | 2136 | 				 &local->mad_priv->header.recv_wc.rmpp_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2137 | 			local->mad_priv->header.recv_wc.recv_buf.grh = NULL; | 
 | 2138 | 			local->mad_priv->header.recv_wc.recv_buf.mad = | 
 | 2139 | 						&local->mad_priv->mad.mad; | 
 | 2140 | 			if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) | 
 | 2141 | 				snoop_recv(recv_mad_agent->qp_info, | 
 | 2142 | 					  &local->mad_priv->header.recv_wc, | 
 | 2143 | 					   IB_MAD_SNOOP_RECVS); | 
 | 2144 | 			recv_mad_agent->agent.recv_handler( | 
 | 2145 | 						&recv_mad_agent->agent, | 
 | 2146 | 						&local->mad_priv->header.recv_wc); | 
 | 2147 | 			spin_lock_irqsave(&recv_mad_agent->lock, flags); | 
 | 2148 | 			atomic_dec(&recv_mad_agent->refcount); | 
 | 2149 | 			spin_unlock_irqrestore(&recv_mad_agent->lock, flags); | 
 | 2150 | 		} | 
 | 2151 |  | 
 | 2152 | local_send_completion: | 
 | 2153 | 		/* Complete send */ | 
 | 2154 | 		mad_send_wc.status = IB_WC_SUCCESS; | 
 | 2155 | 		mad_send_wc.vendor_err = 0; | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2156 | 		mad_send_wc.send_buf = &local->mad_send_wr->send_buf; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2157 | 		if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2158 | 			snoop_send(mad_agent_priv->qp_info, | 
 | 2159 | 				   &local->mad_send_wr->send_buf, | 
 | 2160 | 				   &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2161 | 		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | 
 | 2162 | 						   &mad_send_wc); | 
 | 2163 |  | 
 | 2164 | 		spin_lock_irqsave(&mad_agent_priv->lock, flags); | 
 | 2165 | 		list_del(&local->completion_list); | 
 | 2166 | 		atomic_dec(&mad_agent_priv->refcount); | 
| Hal Rosenstock | 2c153b9 | 2005-07-27 11:45:31 -0700 | [diff] [blame] | 2167 | 		if (!recv) | 
 | 2168 | 			kmem_cache_free(ib_mad_cache, local->mad_priv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2169 | 		kfree(local); | 
 | 2170 | 	} | 
 | 2171 | 	spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 
 | 2172 | } | 
 | 2173 |  | 
| Hal Rosenstock | f75b7a5 | 2005-07-27 11:45:29 -0700 | [diff] [blame] | 2174 | static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) | 
 | 2175 | { | 
 | 2176 | 	int ret; | 
 | 2177 |  | 
 | 2178 | 	if (!mad_send_wr->retries--) | 
 | 2179 | 		return -ETIMEDOUT; | 
 | 2180 |  | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2181 | 	mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); | 
| Hal Rosenstock | f75b7a5 | 2005-07-27 11:45:29 -0700 | [diff] [blame] | 2182 |  | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 2183 | 	if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { | 
 | 2184 | 		ret = ib_retry_rmpp(mad_send_wr); | 
 | 2185 | 		switch (ret) { | 
 | 2186 | 		case IB_RMPP_RESULT_UNHANDLED: | 
 | 2187 | 			ret = ib_send_mad(mad_send_wr); | 
 | 2188 | 			break; | 
 | 2189 | 		case IB_RMPP_RESULT_CONSUMED: | 
 | 2190 | 			ret = 0; | 
 | 2191 | 			break; | 
 | 2192 | 		default: | 
 | 2193 | 			ret = -ECOMM; | 
 | 2194 | 			break; | 
 | 2195 | 		} | 
 | 2196 | 	} else | 
 | 2197 | 		ret = ib_send_mad(mad_send_wr); | 
| Hal Rosenstock | f75b7a5 | 2005-07-27 11:45:29 -0700 | [diff] [blame] | 2198 |  | 
 | 2199 | 	if (!ret) { | 
 | 2200 | 		mad_send_wr->refcount++; | 
| Hal Rosenstock | f75b7a5 | 2005-07-27 11:45:29 -0700 | [diff] [blame] | 2201 | 		list_add_tail(&mad_send_wr->agent_list, | 
 | 2202 | 			      &mad_send_wr->mad_agent_priv->send_list); | 
 | 2203 | 	} | 
 | 2204 | 	return ret; | 
 | 2205 | } | 
 | 2206 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2207 | static void timeout_sends(void *data) | 
 | 2208 | { | 
 | 2209 | 	struct ib_mad_agent_private *mad_agent_priv; | 
 | 2210 | 	struct ib_mad_send_wr_private *mad_send_wr; | 
 | 2211 | 	struct ib_mad_send_wc mad_send_wc; | 
 | 2212 | 	unsigned long flags, delay; | 
 | 2213 |  | 
 | 2214 | 	mad_agent_priv = (struct ib_mad_agent_private *)data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2215 | 	mad_send_wc.vendor_err = 0; | 
 | 2216 |  | 
 | 2217 | 	spin_lock_irqsave(&mad_agent_priv->lock, flags); | 
 | 2218 | 	while (!list_empty(&mad_agent_priv->wait_list)) { | 
 | 2219 | 		mad_send_wr = list_entry(mad_agent_priv->wait_list.next, | 
 | 2220 | 					 struct ib_mad_send_wr_private, | 
 | 2221 | 					 agent_list); | 
 | 2222 |  | 
 | 2223 | 		if (time_after(mad_send_wr->timeout, jiffies)) { | 
 | 2224 | 			delay = mad_send_wr->timeout - jiffies; | 
 | 2225 | 			if ((long)delay <= 0) | 
 | 2226 | 				delay = 1; | 
 | 2227 | 			queue_delayed_work(mad_agent_priv->qp_info-> | 
 | 2228 | 					   port_priv->wq, | 
 | 2229 | 					   &mad_agent_priv->timed_work, delay); | 
 | 2230 | 			break; | 
 | 2231 | 		} | 
 | 2232 |  | 
| Hal Rosenstock | dbf9227 | 2005-07-27 11:45:30 -0700 | [diff] [blame] | 2233 | 		list_del(&mad_send_wr->agent_list); | 
| Hal Rosenstock | 29bb33d | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2234 | 		if (mad_send_wr->status == IB_WC_SUCCESS && | 
 | 2235 | 		    !retry_send(mad_send_wr)) | 
| Hal Rosenstock | f75b7a5 | 2005-07-27 11:45:29 -0700 | [diff] [blame] | 2236 | 			continue; | 
 | 2237 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2238 | 		spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 
 | 2239 |  | 
| Hal Rosenstock | 03b61ad | 2005-07-27 11:45:32 -0700 | [diff] [blame] | 2240 | 		if (mad_send_wr->status == IB_WC_SUCCESS) | 
 | 2241 | 			mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; | 
 | 2242 | 		else | 
 | 2243 | 			mad_send_wc.status = mad_send_wr->status; | 
| Sean Hefty | 34816ad | 2005-10-25 10:51:39 -0700 | [diff] [blame] | 2244 | 		mad_send_wc.send_buf = &mad_send_wr->send_buf; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2245 | 		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | 
 | 2246 | 						   &mad_send_wc); | 
 | 2247 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2248 | 		atomic_dec(&mad_agent_priv->refcount); | 
 | 2249 | 		spin_lock_irqsave(&mad_agent_priv->lock, flags); | 
 | 2250 | 	} | 
 | 2251 | 	spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 
 | 2252 | } | 
 | 2253 |  | 
| Hal Rosenstock | 5dd2ce1 | 2005-08-15 14:16:36 -0700 | [diff] [blame] | 2254 | static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2255 | { | 
 | 2256 | 	struct ib_mad_port_private *port_priv = cq->cq_context; | 
 | 2257 |  | 
 | 2258 | 	queue_work(port_priv->wq, &port_priv->work); | 
 | 2259 | } | 
 | 2260 |  | 
 | 2261 | /* | 
 | 2262 |  * Allocate receive MADs and post receive WRs for them | 
 | 2263 |  */ | 
 | 2264 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | 
 | 2265 | 				    struct ib_mad_private *mad) | 
 | 2266 | { | 
 | 2267 | 	unsigned long flags; | 
 | 2268 | 	int post, ret; | 
 | 2269 | 	struct ib_mad_private *mad_priv; | 
 | 2270 | 	struct ib_sge sg_list; | 
 | 2271 | 	struct ib_recv_wr recv_wr, *bad_recv_wr; | 
 | 2272 | 	struct ib_mad_queue *recv_queue = &qp_info->recv_queue; | 
 | 2273 |  | 
 | 2274 | 	/* Initialize common scatter list fields */ | 
 | 2275 | 	sg_list.length = sizeof *mad_priv - sizeof mad_priv->header; | 
 | 2276 | 	sg_list.lkey = (*qp_info->port_priv->mr).lkey; | 
 | 2277 |  | 
 | 2278 | 	/* Initialize common receive WR fields */ | 
 | 2279 | 	recv_wr.next = NULL; | 
 | 2280 | 	recv_wr.sg_list = &sg_list; | 
 | 2281 | 	recv_wr.num_sge = 1; | 
 | 2282 |  | 
 | 2283 | 	do { | 
 | 2284 | 		/* Allocate and map receive buffer */ | 
 | 2285 | 		if (mad) { | 
 | 2286 | 			mad_priv = mad; | 
 | 2287 | 			mad = NULL; | 
 | 2288 | 		} else { | 
 | 2289 | 			mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); | 
 | 2290 | 			if (!mad_priv) { | 
 | 2291 | 				printk(KERN_ERR PFX "No memory for receive buffer\n"); | 
 | 2292 | 				ret = -ENOMEM; | 
 | 2293 | 				break; | 
 | 2294 | 			} | 
 | 2295 | 		} | 
 | 2296 | 		sg_list.addr = dma_map_single(qp_info->port_priv-> | 
 | 2297 | 						device->dma_device, | 
 | 2298 | 					&mad_priv->grh, | 
 | 2299 | 					sizeof *mad_priv - | 
 | 2300 | 						sizeof mad_priv->header, | 
 | 2301 | 					DMA_FROM_DEVICE); | 
 | 2302 | 		pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); | 
 | 2303 | 		recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; | 
 | 2304 | 		mad_priv->header.mad_list.mad_queue = recv_queue; | 
 | 2305 |  | 
 | 2306 | 		/* Post receive WR */ | 
 | 2307 | 		spin_lock_irqsave(&recv_queue->lock, flags); | 
 | 2308 | 		post = (++recv_queue->count < recv_queue->max_active); | 
 | 2309 | 		list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); | 
 | 2310 | 		spin_unlock_irqrestore(&recv_queue->lock, flags); | 
 | 2311 | 		ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); | 
 | 2312 | 		if (ret) { | 
 | 2313 | 			spin_lock_irqsave(&recv_queue->lock, flags); | 
 | 2314 | 			list_del(&mad_priv->header.mad_list.list); | 
 | 2315 | 			recv_queue->count--; | 
 | 2316 | 			spin_unlock_irqrestore(&recv_queue->lock, flags); | 
 | 2317 | 			dma_unmap_single(qp_info->port_priv->device->dma_device, | 
 | 2318 | 					 pci_unmap_addr(&mad_priv->header, | 
 | 2319 | 							mapping), | 
 | 2320 | 					 sizeof *mad_priv - | 
 | 2321 | 					   sizeof mad_priv->header, | 
 | 2322 | 					 DMA_FROM_DEVICE); | 
 | 2323 | 			kmem_cache_free(ib_mad_cache, mad_priv); | 
 | 2324 | 			printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); | 
 | 2325 | 			break; | 
 | 2326 | 		} | 
 | 2327 | 	} while (post); | 
 | 2328 |  | 
 | 2329 | 	return ret; | 
 | 2330 | } | 
 | 2331 |  | 
 | 2332 | /* | 
 | 2333 |  * Return all the posted receive MADs | 
 | 2334 |  */ | 
 | 2335 | static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) | 
 | 2336 | { | 
 | 2337 | 	struct ib_mad_private_header *mad_priv_hdr; | 
 | 2338 | 	struct ib_mad_private *recv; | 
 | 2339 | 	struct ib_mad_list_head *mad_list; | 
 | 2340 |  | 
 | 2341 | 	while (!list_empty(&qp_info->recv_queue.list)) { | 
 | 2342 |  | 
 | 2343 | 		mad_list = list_entry(qp_info->recv_queue.list.next, | 
 | 2344 | 				      struct ib_mad_list_head, list); | 
 | 2345 | 		mad_priv_hdr = container_of(mad_list, | 
 | 2346 | 					    struct ib_mad_private_header, | 
 | 2347 | 					    mad_list); | 
 | 2348 | 		recv = container_of(mad_priv_hdr, struct ib_mad_private, | 
 | 2349 | 				    header); | 
 | 2350 |  | 
 | 2351 | 		/* Remove from posted receive MAD list */ | 
 | 2352 | 		list_del(&mad_list->list); | 
 | 2353 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2354 | 		dma_unmap_single(qp_info->port_priv->device->dma_device, | 
 | 2355 | 				 pci_unmap_addr(&recv->header, mapping), | 
 | 2356 | 				 sizeof(struct ib_mad_private) - | 
 | 2357 | 				 sizeof(struct ib_mad_private_header), | 
 | 2358 | 				 DMA_FROM_DEVICE); | 
 | 2359 | 		kmem_cache_free(ib_mad_cache, recv); | 
 | 2360 | 	} | 
 | 2361 |  | 
 | 2362 | 	qp_info->recv_queue.count = 0; | 
 | 2363 | } | 
 | 2364 |  | 
 | 2365 | /* | 
 | 2366 |  * Start the port | 
 | 2367 |  */ | 
 | 2368 | static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | 
 | 2369 | { | 
 | 2370 | 	int ret, i; | 
 | 2371 | 	struct ib_qp_attr *attr; | 
 | 2372 | 	struct ib_qp *qp; | 
 | 2373 |  | 
 | 2374 | 	attr = kmalloc(sizeof *attr, GFP_KERNEL); | 
 | 2375 |  	if (!attr) { | 
 | 2376 | 		printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n"); | 
 | 2377 | 		return -ENOMEM; | 
 | 2378 | 	} | 
 | 2379 |  | 
 | 2380 | 	for (i = 0; i < IB_MAD_QPS_CORE; i++) { | 
 | 2381 | 		qp = port_priv->qp_info[i].qp; | 
 | 2382 | 		/* | 
 | 2383 | 		 * PKey index for QP1 is irrelevant but | 
 | 2384 | 		 * one is needed for the Reset to Init transition | 
 | 2385 | 		 */ | 
 | 2386 | 		attr->qp_state = IB_QPS_INIT; | 
 | 2387 | 		attr->pkey_index = 0; | 
 | 2388 | 		attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; | 
 | 2389 | 		ret = ib_modify_qp(qp, attr, IB_QP_STATE | | 
 | 2390 | 					     IB_QP_PKEY_INDEX | IB_QP_QKEY); | 
 | 2391 | 		if (ret) { | 
 | 2392 | 			printk(KERN_ERR PFX "Couldn't change QP%d state to " | 
 | 2393 | 			       "INIT: %d\n", i, ret); | 
 | 2394 | 			goto out; | 
 | 2395 | 		} | 
 | 2396 |  | 
 | 2397 | 		attr->qp_state = IB_QPS_RTR; | 
 | 2398 | 		ret = ib_modify_qp(qp, attr, IB_QP_STATE); | 
 | 2399 | 		if (ret) { | 
 | 2400 | 			printk(KERN_ERR PFX "Couldn't change QP%d state to " | 
 | 2401 | 			       "RTR: %d\n", i, ret); | 
 | 2402 | 			goto out; | 
 | 2403 | 		} | 
 | 2404 |  | 
 | 2405 | 		attr->qp_state = IB_QPS_RTS; | 
 | 2406 | 		attr->sq_psn = IB_MAD_SEND_Q_PSN; | 
 | 2407 | 		ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); | 
 | 2408 | 		if (ret) { | 
 | 2409 | 			printk(KERN_ERR PFX "Couldn't change QP%d state to " | 
 | 2410 | 			       "RTS: %d\n", i, ret); | 
 | 2411 | 			goto out; | 
 | 2412 | 		} | 
 | 2413 | 	} | 
 | 2414 |  | 
 | 2415 | 	ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); | 
 | 2416 | 	if (ret) { | 
 | 2417 | 		printk(KERN_ERR PFX "Failed to request completion " | 
 | 2418 | 		       "notification: %d\n", ret); | 
 | 2419 | 		goto out; | 
 | 2420 | 	} | 
 | 2421 |  | 
 | 2422 | 	for (i = 0; i < IB_MAD_QPS_CORE; i++) { | 
 | 2423 | 		ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); | 
 | 2424 | 		if (ret) { | 
 | 2425 | 			printk(KERN_ERR PFX "Couldn't post receive WRs\n"); | 
 | 2426 | 			goto out; | 
 | 2427 | 		} | 
 | 2428 | 	} | 
 | 2429 | out: | 
 | 2430 | 	kfree(attr); | 
 | 2431 | 	return ret; | 
 | 2432 | } | 
 | 2433 |  | 
 | 2434 | static void qp_event_handler(struct ib_event *event, void *qp_context) | 
 | 2435 | { | 
 | 2436 | 	struct ib_mad_qp_info	*qp_info = qp_context; | 
 | 2437 |  | 
 | 2438 | 	/* It's worse than that! He's dead, Jim! */ | 
 | 2439 | 	printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n", | 
 | 2440 | 		event->event, qp_info->qp->qp_num); | 
 | 2441 | } | 
 | 2442 |  | 
 | 2443 | static void init_mad_queue(struct ib_mad_qp_info *qp_info, | 
 | 2444 | 			   struct ib_mad_queue *mad_queue) | 
 | 2445 | { | 
 | 2446 | 	mad_queue->qp_info = qp_info; | 
 | 2447 | 	mad_queue->count = 0; | 
 | 2448 | 	spin_lock_init(&mad_queue->lock); | 
 | 2449 | 	INIT_LIST_HEAD(&mad_queue->list); | 
 | 2450 | } | 
 | 2451 |  | 
 | 2452 | static void init_mad_qp(struct ib_mad_port_private *port_priv, | 
 | 2453 | 			struct ib_mad_qp_info *qp_info) | 
 | 2454 | { | 
 | 2455 | 	qp_info->port_priv = port_priv; | 
 | 2456 | 	init_mad_queue(qp_info, &qp_info->send_queue); | 
 | 2457 | 	init_mad_queue(qp_info, &qp_info->recv_queue); | 
 | 2458 | 	INIT_LIST_HEAD(&qp_info->overflow_list); | 
 | 2459 | 	spin_lock_init(&qp_info->snoop_lock); | 
 | 2460 | 	qp_info->snoop_table = NULL; | 
 | 2461 | 	qp_info->snoop_table_size = 0; | 
 | 2462 | 	atomic_set(&qp_info->snoop_count, 0); | 
 | 2463 | } | 
 | 2464 |  | 
 | 2465 | static int create_mad_qp(struct ib_mad_qp_info *qp_info, | 
 | 2466 | 			 enum ib_qp_type qp_type) | 
 | 2467 | { | 
 | 2468 | 	struct ib_qp_init_attr	qp_init_attr; | 
 | 2469 | 	int ret; | 
 | 2470 |  | 
 | 2471 | 	memset(&qp_init_attr, 0, sizeof qp_init_attr); | 
 | 2472 | 	qp_init_attr.send_cq = qp_info->port_priv->cq; | 
 | 2473 | 	qp_init_attr.recv_cq = qp_info->port_priv->cq; | 
 | 2474 | 	qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; | 
 | 2475 | 	qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE; | 
 | 2476 | 	qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE; | 
 | 2477 | 	qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; | 
 | 2478 | 	qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; | 
 | 2479 | 	qp_init_attr.qp_type = qp_type; | 
 | 2480 | 	qp_init_attr.port_num = qp_info->port_priv->port_num; | 
 | 2481 | 	qp_init_attr.qp_context = qp_info; | 
 | 2482 | 	qp_init_attr.event_handler = qp_event_handler; | 
 | 2483 | 	qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); | 
 | 2484 | 	if (IS_ERR(qp_info->qp)) { | 
 | 2485 | 		printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n", | 
 | 2486 | 		       get_spl_qp_index(qp_type)); | 
 | 2487 | 		ret = PTR_ERR(qp_info->qp); | 
 | 2488 | 		goto error; | 
 | 2489 | 	} | 
 | 2490 | 	/* Use minimum queue sizes unless the CQ is resized */ | 
 | 2491 | 	qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE; | 
 | 2492 | 	qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE; | 
 | 2493 | 	return 0; | 
 | 2494 |  | 
 | 2495 | error: | 
 | 2496 | 	return ret; | 
 | 2497 | } | 
 | 2498 |  | 
 | 2499 | static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) | 
 | 2500 | { | 
 | 2501 | 	ib_destroy_qp(qp_info->qp); | 
| Jesper Juhl | 6044ec8 | 2005-11-07 01:01:32 -0800 | [diff] [blame] | 2502 | 	kfree(qp_info->snoop_table); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2503 | } | 
 | 2504 |  | 
 | 2505 | /* | 
 | 2506 |  * Open the port | 
 | 2507 |  * Create the QP, PD, MR, and CQ if needed | 
 | 2508 |  */ | 
 | 2509 | static int ib_mad_port_open(struct ib_device *device, | 
 | 2510 | 			    int port_num) | 
 | 2511 | { | 
 | 2512 | 	int ret, cq_size; | 
 | 2513 | 	struct ib_mad_port_private *port_priv; | 
 | 2514 | 	unsigned long flags; | 
 | 2515 | 	char name[sizeof "ib_mad123"]; | 
 | 2516 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2517 | 	/* Create new device info */ | 
| Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 2518 | 	port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2519 | 	if (!port_priv) { | 
 | 2520 | 		printk(KERN_ERR PFX "No memory for ib_mad_port_private\n"); | 
 | 2521 | 		return -ENOMEM; | 
 | 2522 | 	} | 
| Roland Dreier | de6eb66 | 2005-11-02 07:23:14 -0800 | [diff] [blame] | 2523 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2524 | 	port_priv->device = device; | 
 | 2525 | 	port_priv->port_num = port_num; | 
 | 2526 | 	spin_lock_init(&port_priv->reg_lock); | 
 | 2527 | 	INIT_LIST_HEAD(&port_priv->agent_list); | 
 | 2528 | 	init_mad_qp(port_priv, &port_priv->qp_info[0]); | 
 | 2529 | 	init_mad_qp(port_priv, &port_priv->qp_info[1]); | 
 | 2530 |  | 
 | 2531 | 	cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; | 
 | 2532 | 	port_priv->cq = ib_create_cq(port_priv->device, | 
| Hal Rosenstock | 5dd2ce1 | 2005-08-15 14:16:36 -0700 | [diff] [blame] | 2533 | 				     ib_mad_thread_completion_handler, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2534 | 				     NULL, port_priv, cq_size); | 
 | 2535 | 	if (IS_ERR(port_priv->cq)) { | 
 | 2536 | 		printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); | 
 | 2537 | 		ret = PTR_ERR(port_priv->cq); | 
 | 2538 | 		goto error3; | 
 | 2539 | 	} | 
 | 2540 |  | 
 | 2541 | 	port_priv->pd = ib_alloc_pd(device); | 
 | 2542 | 	if (IS_ERR(port_priv->pd)) { | 
 | 2543 | 		printk(KERN_ERR PFX "Couldn't create ib_mad PD\n"); | 
 | 2544 | 		ret = PTR_ERR(port_priv->pd); | 
 | 2545 | 		goto error4; | 
 | 2546 | 	} | 
 | 2547 |  | 
 | 2548 | 	port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE); | 
 | 2549 | 	if (IS_ERR(port_priv->mr)) { | 
 | 2550 | 		printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n"); | 
 | 2551 | 		ret = PTR_ERR(port_priv->mr); | 
 | 2552 | 		goto error5; | 
 | 2553 | 	} | 
 | 2554 |  | 
 | 2555 | 	ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); | 
 | 2556 | 	if (ret) | 
 | 2557 | 		goto error6; | 
 | 2558 | 	ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); | 
 | 2559 | 	if (ret) | 
 | 2560 | 		goto error7; | 
 | 2561 |  | 
 | 2562 | 	snprintf(name, sizeof name, "ib_mad%d", port_num); | 
 | 2563 | 	port_priv->wq = create_singlethread_workqueue(name); | 
 | 2564 | 	if (!port_priv->wq) { | 
 | 2565 | 		ret = -ENOMEM; | 
 | 2566 | 		goto error8; | 
 | 2567 | 	} | 
 | 2568 | 	INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv); | 
 | 2569 |  | 
 | 2570 | 	ret = ib_mad_port_start(port_priv); | 
 | 2571 | 	if (ret) { | 
 | 2572 | 		printk(KERN_ERR PFX "Couldn't start port\n"); | 
 | 2573 | 		goto error9; | 
 | 2574 | 	} | 
 | 2575 |  | 
 | 2576 | 	spin_lock_irqsave(&ib_mad_port_list_lock, flags); | 
 | 2577 | 	list_add_tail(&port_priv->port_list, &ib_mad_port_list); | 
 | 2578 | 	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | 
 | 2579 | 	return 0; | 
 | 2580 |  | 
 | 2581 | error9: | 
 | 2582 | 	destroy_workqueue(port_priv->wq); | 
 | 2583 | error8: | 
 | 2584 | 	destroy_mad_qp(&port_priv->qp_info[1]); | 
 | 2585 | error7: | 
 | 2586 | 	destroy_mad_qp(&port_priv->qp_info[0]); | 
 | 2587 | error6: | 
 | 2588 | 	ib_dereg_mr(port_priv->mr); | 
 | 2589 | error5: | 
 | 2590 | 	ib_dealloc_pd(port_priv->pd); | 
 | 2591 | error4: | 
 | 2592 | 	ib_destroy_cq(port_priv->cq); | 
 | 2593 | 	cleanup_recv_queue(&port_priv->qp_info[1]); | 
 | 2594 | 	cleanup_recv_queue(&port_priv->qp_info[0]); | 
 | 2595 | error3: | 
 | 2596 | 	kfree(port_priv); | 
 | 2597 |  | 
 | 2598 | 	return ret; | 
 | 2599 | } | 
 | 2600 |  | 
 | 2601 | /* | 
 | 2602 |  * Close the port | 
 | 2603 |  * If there are no classes using the port, free the port | 
 | 2604 |  * resources (CQ, MR, PD, QP) and remove the port's info structure | 
 | 2605 |  */ | 
 | 2606 | static int ib_mad_port_close(struct ib_device *device, int port_num) | 
 | 2607 | { | 
 | 2608 | 	struct ib_mad_port_private *port_priv; | 
 | 2609 | 	unsigned long flags; | 
 | 2610 |  | 
 | 2611 | 	spin_lock_irqsave(&ib_mad_port_list_lock, flags); | 
 | 2612 | 	port_priv = __ib_get_mad_port(device, port_num); | 
 | 2613 | 	if (port_priv == NULL) { | 
 | 2614 | 		spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | 
 | 2615 | 		printk(KERN_ERR PFX "Port %d not found\n", port_num); | 
 | 2616 | 		return -ENODEV; | 
 | 2617 | 	} | 
 | 2618 | 	list_del(&port_priv->port_list); | 
 | 2619 | 	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | 
 | 2620 |  | 
 | 2621 | 	/* Stop processing completions. */ | 
 | 2622 | 	flush_workqueue(port_priv->wq); | 
 | 2623 | 	destroy_workqueue(port_priv->wq); | 
 | 2624 | 	destroy_mad_qp(&port_priv->qp_info[1]); | 
 | 2625 | 	destroy_mad_qp(&port_priv->qp_info[0]); | 
 | 2626 | 	ib_dereg_mr(port_priv->mr); | 
 | 2627 | 	ib_dealloc_pd(port_priv->pd); | 
 | 2628 | 	ib_destroy_cq(port_priv->cq); | 
 | 2629 | 	cleanup_recv_queue(&port_priv->qp_info[1]); | 
 | 2630 | 	cleanup_recv_queue(&port_priv->qp_info[0]); | 
 | 2631 | 	/* XXX: Handle deallocation of MAD registration tables */ | 
 | 2632 |  | 
 | 2633 | 	kfree(port_priv); | 
 | 2634 |  | 
 | 2635 | 	return 0; | 
 | 2636 | } | 
 | 2637 |  | 
 | 2638 | static void ib_mad_init_device(struct ib_device *device) | 
 | 2639 | { | 
| Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 2640 | 	int start, end, i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2641 |  | 
 | 2642 | 	if (device->node_type == IB_NODE_SWITCH) { | 
| Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 2643 | 		start = 0; | 
 | 2644 | 		end   = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2645 | 	} else { | 
| Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 2646 | 		start = 1; | 
 | 2647 | 		end   = device->phys_port_cnt; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2648 | 	} | 
| Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 2649 |  | 
 | 2650 | 	for (i = start; i <= end; i++) { | 
 | 2651 | 		if (ib_mad_port_open(device, i)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2652 | 			printk(KERN_ERR PFX "Couldn't open %s port %d\n", | 
| Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 2653 | 			       device->name, i); | 
 | 2654 | 			goto error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2655 | 		} | 
| Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 2656 | 		if (ib_agent_port_open(device, i)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2657 | 			printk(KERN_ERR PFX "Couldn't open %s port %d " | 
 | 2658 | 			       "for agents\n", | 
| Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 2659 | 			       device->name, i); | 
 | 2660 | 			goto error_agent; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2661 | 		} | 
 | 2662 | 	} | 
| Hal Rosenstock | f68bcc2 | 2005-07-27 11:45:27 -0700 | [diff] [blame] | 2663 | 	return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2664 |  | 
| Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 2665 | error_agent: | 
 | 2666 | 	if (ib_mad_port_close(device, i)) | 
 | 2667 | 		printk(KERN_ERR PFX "Couldn't close %s port %d\n", | 
 | 2668 | 		       device->name, i); | 
 | 2669 |  | 
 | 2670 | error: | 
 | 2671 | 	i--; | 
 | 2672 |  | 
 | 2673 | 	while (i >= start) { | 
 | 2674 | 		if (ib_agent_port_close(device, i)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2675 | 			printk(KERN_ERR PFX "Couldn't close %s port %d " | 
 | 2676 | 			       "for agents\n", | 
| Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 2677 | 			       device->name, i); | 
 | 2678 | 		if (ib_mad_port_close(device, i)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2679 | 			printk(KERN_ERR PFX "Couldn't close %s port %d\n", | 
| Roland Dreier | 4ab6fb7 | 2005-10-06 13:28:16 -0700 | [diff] [blame] | 2680 | 			       device->name, i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2681 | 		i--; | 
 | 2682 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2683 | } | 
 | 2684 |  | 
 | 2685 | static void ib_mad_remove_device(struct ib_device *device) | 
 | 2686 | { | 
| Hal Rosenstock | f68bcc2 | 2005-07-27 11:45:27 -0700 | [diff] [blame] | 2687 | 	int i, num_ports, cur_port; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2688 |  | 
 | 2689 | 	if (device->node_type == IB_NODE_SWITCH) { | 
 | 2690 | 		num_ports = 1; | 
 | 2691 | 		cur_port = 0; | 
 | 2692 | 	} else { | 
 | 2693 | 		num_ports = device->phys_port_cnt; | 
 | 2694 | 		cur_port = 1; | 
 | 2695 | 	} | 
 | 2696 | 	for (i = 0; i < num_ports; i++, cur_port++) { | 
| Hal Rosenstock | f68bcc2 | 2005-07-27 11:45:27 -0700 | [diff] [blame] | 2697 | 		if (ib_agent_port_close(device, cur_port)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2698 | 			printk(KERN_ERR PFX "Couldn't close %s port %d " | 
 | 2699 | 			       "for agents\n", | 
 | 2700 | 			       device->name, cur_port); | 
| Hal Rosenstock | f68bcc2 | 2005-07-27 11:45:27 -0700 | [diff] [blame] | 2701 | 		if (ib_mad_port_close(device, cur_port)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2702 | 			printk(KERN_ERR PFX "Couldn't close %s port %d\n", | 
 | 2703 | 			       device->name, cur_port); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2704 | 	} | 
 | 2705 | } | 
 | 2706 |  | 
 | 2707 | static struct ib_client mad_client = { | 
 | 2708 | 	.name   = "mad", | 
 | 2709 | 	.add = ib_mad_init_device, | 
 | 2710 | 	.remove = ib_mad_remove_device | 
 | 2711 | }; | 
 | 2712 |  | 
 | 2713 | static int __init ib_mad_init_module(void) | 
 | 2714 | { | 
 | 2715 | 	int ret; | 
 | 2716 |  | 
 | 2717 | 	spin_lock_init(&ib_mad_port_list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2718 |  | 
 | 2719 | 	ib_mad_cache = kmem_cache_create("ib_mad", | 
 | 2720 | 					 sizeof(struct ib_mad_private), | 
 | 2721 | 					 0, | 
 | 2722 | 					 SLAB_HWCACHE_ALIGN, | 
 | 2723 | 					 NULL, | 
 | 2724 | 					 NULL); | 
 | 2725 | 	if (!ib_mad_cache) { | 
 | 2726 | 		printk(KERN_ERR PFX "Couldn't create ib_mad cache\n"); | 
 | 2727 | 		ret = -ENOMEM; | 
 | 2728 | 		goto error1; | 
 | 2729 | 	} | 
 | 2730 |  | 
 | 2731 | 	INIT_LIST_HEAD(&ib_mad_port_list); | 
 | 2732 |  | 
 | 2733 | 	if (ib_register_client(&mad_client)) { | 
 | 2734 | 		printk(KERN_ERR PFX "Couldn't register ib_mad client\n"); | 
 | 2735 | 		ret = -EINVAL; | 
 | 2736 | 		goto error2; | 
 | 2737 | 	} | 
 | 2738 |  | 
 | 2739 | 	return 0; | 
 | 2740 |  | 
 | 2741 | error2: | 
 | 2742 | 	kmem_cache_destroy(ib_mad_cache); | 
 | 2743 | error1: | 
 | 2744 | 	return ret; | 
 | 2745 | } | 
 | 2746 |  | 
 | 2747 | static void __exit ib_mad_cleanup_module(void) | 
 | 2748 | { | 
 | 2749 | 	ib_unregister_client(&mad_client); | 
 | 2750 |  | 
 | 2751 | 	if (kmem_cache_destroy(ib_mad_cache)) { | 
 | 2752 | 		printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n"); | 
 | 2753 | 	} | 
 | 2754 | } | 
 | 2755 |  | 
 | 2756 | module_init(ib_mad_init_module); | 
 | 2757 | module_exit(ib_mad_cleanup_module); | 
| Hal Rosenstock | fa619a7 | 2005-07-27 11:45:37 -0700 | [diff] [blame] | 2758 |  |