Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2006 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | * |
| 32 | */ |
| 33 | #include <linux/kernel.h> |
| 34 | #include <linux/in.h> |
| 35 | #include <linux/if.h> |
| 36 | #include <linux/netdevice.h> |
| 37 | #include <linux/inetdevice.h> |
| 38 | #include <linux/if_arp.h> |
| 39 | #include <linux/delay.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 40 | #include <linux/slab.h> |
Paul Gortmaker | 3a9a231 | 2011-05-27 09:12:25 -0400 | [diff] [blame] | 41 | #include <linux/module.h> |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 42 | |
| 43 | #include "rds.h" |
| 44 | #include "ib.h" |
| 45 | |
stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 46 | static unsigned int fmr_pool_size = RDS_FMR_POOL_SIZE; |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 47 | unsigned int fmr_message_size = RDS_FMR_SIZE + 1; /* +1 allows for unaligned MRs */ |
Andy Grover | 3ba23ad | 2009-07-17 13:13:22 +0000 | [diff] [blame] | 48 | unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT; |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 49 | |
| 50 | module_param(fmr_pool_size, int, 0444); |
| 51 | MODULE_PARM_DESC(fmr_pool_size, " Max number of fmr per HCA"); |
| 52 | module_param(fmr_message_size, int, 0444); |
| 53 | MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer"); |
Andy Grover | 3ba23ad | 2009-07-17 13:13:22 +0000 | [diff] [blame] | 54 | module_param(rds_ib_retry_count, int, 0444); |
| 55 | MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error"); |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 56 | |
Zach Brown | ea81986 | 2010-07-15 12:34:33 -0700 | [diff] [blame] | 57 | /* |
| 58 | * we have a clumsy combination of RCU and a rwsem protecting this list |
| 59 | * because it is used both in the get_mr fast path and while blocking in |
| 60 | * the FMR flushing path. |
| 61 | */ |
| 62 | DECLARE_RWSEM(rds_ib_devices_lock); |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 63 | struct list_head rds_ib_devices; |
| 64 | |
Andy Grover | 745cbcc | 2009-04-01 08:20:19 +0000 | [diff] [blame] | 65 | /* NOTE: if also grabbing ibdev lock, grab this first */ |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 66 | DEFINE_SPINLOCK(ib_nodev_conns_lock); |
| 67 | LIST_HEAD(ib_nodev_conns); |
| 68 | |
stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 69 | static void rds_ib_nodev_connect(void) |
Zach Brown | fc19de3 | 2010-05-24 13:16:57 -0700 | [diff] [blame] | 70 | { |
| 71 | struct rds_ib_connection *ic; |
| 72 | |
| 73 | spin_lock(&ib_nodev_conns_lock); |
| 74 | list_for_each_entry(ic, &ib_nodev_conns, ib_node) |
| 75 | rds_conn_connect_if_down(ic->conn); |
| 76 | spin_unlock(&ib_nodev_conns_lock); |
| 77 | } |
| 78 | |
stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 79 | static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev) |
Zach Brown | fc19de3 | 2010-05-24 13:16:57 -0700 | [diff] [blame] | 80 | { |
| 81 | struct rds_ib_connection *ic; |
| 82 | unsigned long flags; |
| 83 | |
| 84 | spin_lock_irqsave(&rds_ibdev->spinlock, flags); |
| 85 | list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node) |
| 86 | rds_conn_drop(ic->conn); |
| 87 | spin_unlock_irqrestore(&rds_ibdev->spinlock, flags); |
| 88 | } |
| 89 | |
Zach Brown | 3e0249f | 2010-05-18 15:48:51 -0700 | [diff] [blame] | 90 | /* |
| 91 | * rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references |
| 92 | * from interrupt context so we push freing off into a work struct in krdsd. |
| 93 | */ |
| 94 | static void rds_ib_dev_free(struct work_struct *work) |
| 95 | { |
| 96 | struct rds_ib_ipaddr *i_ipaddr, *i_next; |
| 97 | struct rds_ib_device *rds_ibdev = container_of(work, |
| 98 | struct rds_ib_device, free_work); |
| 99 | |
| 100 | if (rds_ibdev->mr_pool) |
| 101 | rds_ib_destroy_mr_pool(rds_ibdev->mr_pool); |
| 102 | if (rds_ibdev->mr) |
| 103 | ib_dereg_mr(rds_ibdev->mr); |
| 104 | if (rds_ibdev->pd) |
| 105 | ib_dealloc_pd(rds_ibdev->pd); |
| 106 | |
| 107 | list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) { |
| 108 | list_del(&i_ipaddr->list); |
| 109 | kfree(i_ipaddr); |
| 110 | } |
| 111 | |
| 112 | kfree(rds_ibdev); |
| 113 | } |
| 114 | |
| 115 | void rds_ib_dev_put(struct rds_ib_device *rds_ibdev) |
| 116 | { |
| 117 | BUG_ON(atomic_read(&rds_ibdev->refcount) <= 0); |
| 118 | if (atomic_dec_and_test(&rds_ibdev->refcount)) |
| 119 | queue_work(rds_wq, &rds_ibdev->free_work); |
| 120 | } |
| 121 | |
stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 122 | static void rds_ib_add_one(struct ib_device *device) |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 123 | { |
| 124 | struct rds_ib_device *rds_ibdev; |
| 125 | struct ib_device_attr *dev_attr; |
| 126 | |
| 127 | /* Only handle IB (no iWARP) devices */ |
| 128 | if (device->node_type != RDMA_NODE_IB_CA) |
| 129 | return; |
| 130 | |
| 131 | dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); |
| 132 | if (!dev_attr) |
| 133 | return; |
| 134 | |
| 135 | if (ib_query_device(device, dev_attr)) { |
| 136 | rdsdebug("Query device failed for %s\n", device->name); |
| 137 | goto free_attr; |
| 138 | } |
| 139 | |
Zach Brown | 3e0249f | 2010-05-18 15:48:51 -0700 | [diff] [blame] | 140 | rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL, |
| 141 | ibdev_to_node(device)); |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 142 | if (!rds_ibdev) |
| 143 | goto free_attr; |
| 144 | |
| 145 | spin_lock_init(&rds_ibdev->spinlock); |
Zach Brown | 3e0249f | 2010-05-18 15:48:51 -0700 | [diff] [blame] | 146 | atomic_set(&rds_ibdev->refcount, 1); |
| 147 | INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free); |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 148 | |
| 149 | rds_ibdev->max_wrs = dev_attr->max_qp_wr; |
| 150 | rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE); |
| 151 | |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 152 | rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32; |
| 153 | rds_ibdev->max_fmrs = dev_attr->max_fmr ? |
| 154 | min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) : |
| 155 | fmr_pool_size; |
| 156 | |
Andy Grover | 40589e7 | 2010-01-12 10:50:48 -0800 | [diff] [blame] | 157 | rds_ibdev->max_initiator_depth = dev_attr->max_qp_init_rd_atom; |
| 158 | rds_ibdev->max_responder_resources = dev_attr->max_qp_rd_atom; |
| 159 | |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 160 | rds_ibdev->dev = device; |
| 161 | rds_ibdev->pd = ib_alloc_pd(device); |
Zach Brown | 3e0249f | 2010-05-18 15:48:51 -0700 | [diff] [blame] | 162 | if (IS_ERR(rds_ibdev->pd)) { |
| 163 | rds_ibdev->pd = NULL; |
| 164 | goto put_dev; |
| 165 | } |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 166 | |
Zach Brown | 3e0249f | 2010-05-18 15:48:51 -0700 | [diff] [blame] | 167 | rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, IB_ACCESS_LOCAL_WRITE); |
| 168 | if (IS_ERR(rds_ibdev->mr)) { |
| 169 | rds_ibdev->mr = NULL; |
| 170 | goto put_dev; |
| 171 | } |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 172 | |
| 173 | rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev); |
| 174 | if (IS_ERR(rds_ibdev->mr_pool)) { |
| 175 | rds_ibdev->mr_pool = NULL; |
Zach Brown | 3e0249f | 2010-05-18 15:48:51 -0700 | [diff] [blame] | 176 | goto put_dev; |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 177 | } |
| 178 | |
| 179 | INIT_LIST_HEAD(&rds_ibdev->ipaddr_list); |
| 180 | INIT_LIST_HEAD(&rds_ibdev->conn_list); |
Zach Brown | ea81986 | 2010-07-15 12:34:33 -0700 | [diff] [blame] | 181 | |
| 182 | down_write(&rds_ib_devices_lock); |
| 183 | list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices); |
| 184 | up_write(&rds_ib_devices_lock); |
Zach Brown | 3e0249f | 2010-05-18 15:48:51 -0700 | [diff] [blame] | 185 | atomic_inc(&rds_ibdev->refcount); |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 186 | |
| 187 | ib_set_client_data(device, &rds_ib_client, rds_ibdev); |
Zach Brown | 3e0249f | 2010-05-18 15:48:51 -0700 | [diff] [blame] | 188 | atomic_inc(&rds_ibdev->refcount); |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 189 | |
Zach Brown | fc19de3 | 2010-05-24 13:16:57 -0700 | [diff] [blame] | 190 | rds_ib_nodev_connect(); |
| 191 | |
Zach Brown | 3e0249f | 2010-05-18 15:48:51 -0700 | [diff] [blame] | 192 | put_dev: |
| 193 | rds_ib_dev_put(rds_ibdev); |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 194 | free_attr: |
| 195 | kfree(dev_attr); |
| 196 | } |
| 197 | |
Zach Brown | 3e0249f | 2010-05-18 15:48:51 -0700 | [diff] [blame] | 198 | /* |
| 199 | * New connections use this to find the device to associate with the |
| 200 | * connection. It's not in the fast path so we're not concerned about the |
| 201 | * performance of the IB call. (As of this writing, it uses an interrupt |
| 202 | * blocking spinlock to serialize walking a per-device list of all registered |
| 203 | * clients.) |
| 204 | * |
| 205 | * RCU is used to handle incoming connections racing with device teardown. |
| 206 | * Rather than use a lock to serialize removal from the client_data and |
| 207 | * getting a new reference, we use an RCU grace period. The destruction |
| 208 | * path removes the device from client_data and then waits for all RCU |
| 209 | * readers to finish. |
| 210 | * |
| 211 | * A new connection can get NULL from this if its arriving on a |
| 212 | * device that is in the process of being removed. |
| 213 | */ |
| 214 | struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device) |
| 215 | { |
| 216 | struct rds_ib_device *rds_ibdev; |
| 217 | |
| 218 | rcu_read_lock(); |
| 219 | rds_ibdev = ib_get_client_data(device, &rds_ib_client); |
| 220 | if (rds_ibdev) |
| 221 | atomic_inc(&rds_ibdev->refcount); |
| 222 | rcu_read_unlock(); |
| 223 | return rds_ibdev; |
| 224 | } |
| 225 | |
| 226 | /* |
| 227 | * The IB stack is letting us know that a device is going away. This can |
| 228 | * happen if the underlying HCA driver is removed or if PCI hotplug is removing |
| 229 | * the pci function, for example. |
| 230 | * |
| 231 | * This can be called at any time and can be racing with any other RDS path. |
| 232 | */ |
stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 233 | static void rds_ib_remove_one(struct ib_device *device) |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 234 | { |
| 235 | struct rds_ib_device *rds_ibdev; |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 236 | |
| 237 | rds_ibdev = ib_get_client_data(device, &rds_ib_client); |
| 238 | if (!rds_ibdev) |
| 239 | return; |
| 240 | |
Zach Brown | fc19de3 | 2010-05-24 13:16:57 -0700 | [diff] [blame] | 241 | rds_ib_dev_shutdown(rds_ibdev); |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 242 | |
Zach Brown | ea81986 | 2010-07-15 12:34:33 -0700 | [diff] [blame] | 243 | /* stop connection attempts from getting a reference to this device. */ |
Zach Brown | 3e0249f | 2010-05-18 15:48:51 -0700 | [diff] [blame] | 244 | ib_set_client_data(device, &rds_ib_client, NULL); |
Zach Brown | ea81986 | 2010-07-15 12:34:33 -0700 | [diff] [blame] | 245 | |
| 246 | down_write(&rds_ib_devices_lock); |
| 247 | list_del_rcu(&rds_ibdev->list); |
| 248 | up_write(&rds_ib_devices_lock); |
| 249 | |
| 250 | /* |
| 251 | * This synchronize rcu is waiting for readers of both the ib |
| 252 | * client data and the devices list to finish before we drop |
| 253 | * both of those references. |
| 254 | */ |
Zach Brown | 3e0249f | 2010-05-18 15:48:51 -0700 | [diff] [blame] | 255 | synchronize_rcu(); |
| 256 | rds_ib_dev_put(rds_ibdev); |
Zach Brown | 3e0249f | 2010-05-18 15:48:51 -0700 | [diff] [blame] | 257 | rds_ib_dev_put(rds_ibdev); |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 258 | } |
| 259 | |
| 260 | struct ib_client rds_ib_client = { |
| 261 | .name = "rds_ib", |
| 262 | .add = rds_ib_add_one, |
| 263 | .remove = rds_ib_remove_one |
| 264 | }; |
| 265 | |
| 266 | static int rds_ib_conn_info_visitor(struct rds_connection *conn, |
| 267 | void *buffer) |
| 268 | { |
| 269 | struct rds_info_rdma_connection *iinfo = buffer; |
| 270 | struct rds_ib_connection *ic; |
| 271 | |
| 272 | /* We will only ever look at IB transports */ |
| 273 | if (conn->c_trans != &rds_ib_transport) |
| 274 | return 0; |
| 275 | |
| 276 | iinfo->src_addr = conn->c_laddr; |
| 277 | iinfo->dst_addr = conn->c_faddr; |
| 278 | |
| 279 | memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid)); |
| 280 | memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid)); |
| 281 | if (rds_conn_state(conn) == RDS_CONN_UP) { |
| 282 | struct rds_ib_device *rds_ibdev; |
| 283 | struct rdma_dev_addr *dev_addr; |
| 284 | |
| 285 | ic = conn->c_transport_data; |
| 286 | dev_addr = &ic->i_cm_id->route.addr.dev_addr; |
| 287 | |
Sean Hefty | 6f8372b | 2009-11-19 13:26:06 -0800 | [diff] [blame] | 288 | rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); |
| 289 | rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 290 | |
Zach Brown | 3e0249f | 2010-05-18 15:48:51 -0700 | [diff] [blame] | 291 | rds_ibdev = ic->rds_ibdev; |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 292 | iinfo->max_send_wr = ic->i_send_ring.w_nr; |
| 293 | iinfo->max_recv_wr = ic->i_recv_ring.w_nr; |
| 294 | iinfo->max_send_sge = rds_ibdev->max_sge; |
| 295 | rds_ib_get_mr_info(rds_ibdev, iinfo); |
| 296 | } |
| 297 | return 1; |
| 298 | } |
| 299 | |
| 300 | static void rds_ib_ic_info(struct socket *sock, unsigned int len, |
| 301 | struct rds_info_iterator *iter, |
| 302 | struct rds_info_lengths *lens) |
| 303 | { |
| 304 | rds_for_each_conn_info(sock, len, iter, lens, |
| 305 | rds_ib_conn_info_visitor, |
| 306 | sizeof(struct rds_info_rdma_connection)); |
| 307 | } |
| 308 | |
| 309 | |
| 310 | /* |
| 311 | * Early RDS/IB was built to only bind to an address if there is an IPoIB |
| 312 | * device with that address set. |
| 313 | * |
| 314 | * If it were me, I'd advocate for something more flexible. Sending and |
| 315 | * receiving should be device-agnostic. Transports would try and maintain |
| 316 | * connections between peers who have messages queued. Userspace would be |
| 317 | * allowed to influence which paths have priority. We could call userspace |
| 318 | * asserting this policy "routing". |
| 319 | */ |
| 320 | static int rds_ib_laddr_check(__be32 addr) |
| 321 | { |
| 322 | int ret; |
| 323 | struct rdma_cm_id *cm_id; |
| 324 | struct sockaddr_in sin; |
| 325 | |
| 326 | /* Create a CMA ID and try to bind it. This catches both |
| 327 | * IB and iWARP capable NICs. |
| 328 | */ |
Sean Hefty | b26f9b9 | 2010-04-01 17:08:41 +0000 | [diff] [blame] | 329 | cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); |
Dan Carpenter | 94713ba | 2009-04-09 14:09:46 +0000 | [diff] [blame] | 330 | if (IS_ERR(cm_id)) |
| 331 | return PTR_ERR(cm_id); |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 332 | |
| 333 | memset(&sin, 0, sizeof(sin)); |
| 334 | sin.sin_family = AF_INET; |
| 335 | sin.sin_addr.s_addr = addr; |
| 336 | |
| 337 | /* rdma_bind_addr will only succeed for IB & iWARP devices */ |
| 338 | ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); |
| 339 | /* due to this, we will claim to support iWARP devices unless we |
| 340 | check node_type. */ |
| 341 | if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA) |
| 342 | ret = -EADDRNOTAVAIL; |
| 343 | |
| 344 | rdsdebug("addr %pI4 ret %d node type %d\n", |
| 345 | &addr, ret, |
| 346 | cm_id->device ? cm_id->device->node_type : -1); |
| 347 | |
| 348 | rdma_destroy_id(cm_id); |
| 349 | |
| 350 | return ret; |
| 351 | } |
| 352 | |
Zach Brown | 24fa163 | 2010-06-25 14:59:49 -0700 | [diff] [blame] | 353 | static void rds_ib_unregister_client(void) |
| 354 | { |
| 355 | ib_unregister_client(&rds_ib_client); |
| 356 | /* wait for rds_ib_dev_free() to complete */ |
| 357 | flush_workqueue(rds_wq); |
| 358 | } |
| 359 | |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 360 | void rds_ib_exit(void) |
| 361 | { |
| 362 | rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); |
Zach Brown | 24fa163 | 2010-06-25 14:59:49 -0700 | [diff] [blame] | 363 | rds_ib_unregister_client(); |
Zach Brown | 8aeb1ba | 2010-06-25 14:58:16 -0700 | [diff] [blame] | 364 | rds_ib_destroy_nodev_conns(); |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 365 | rds_ib_sysctl_exit(); |
| 366 | rds_ib_recv_exit(); |
| 367 | rds_trans_unregister(&rds_ib_transport); |
| 368 | } |
| 369 | |
| 370 | struct rds_transport rds_ib_transport = { |
| 371 | .laddr_check = rds_ib_laddr_check, |
| 372 | .xmit_complete = rds_ib_xmit_complete, |
| 373 | .xmit = rds_ib_xmit, |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 374 | .xmit_rdma = rds_ib_xmit_rdma, |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 375 | .xmit_atomic = rds_ib_xmit_atomic, |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 376 | .recv = rds_ib_recv, |
| 377 | .conn_alloc = rds_ib_conn_alloc, |
| 378 | .conn_free = rds_ib_conn_free, |
| 379 | .conn_connect = rds_ib_conn_connect, |
| 380 | .conn_shutdown = rds_ib_conn_shutdown, |
| 381 | .inc_copy_to_user = rds_ib_inc_copy_to_user, |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 382 | .inc_free = rds_ib_inc_free, |
| 383 | .cm_initiate_connect = rds_ib_cm_initiate_connect, |
| 384 | .cm_handle_connect = rds_ib_cm_handle_connect, |
| 385 | .cm_connect_complete = rds_ib_cm_connect_complete, |
| 386 | .stats_info_copy = rds_ib_stats_info_copy, |
| 387 | .exit = rds_ib_exit, |
| 388 | .get_mr = rds_ib_get_mr, |
| 389 | .sync_mr = rds_ib_sync_mr, |
| 390 | .free_mr = rds_ib_free_mr, |
| 391 | .flush_mrs = rds_ib_flush_mrs, |
| 392 | .t_owner = THIS_MODULE, |
| 393 | .t_name = "infiniband", |
Andy Grover | 335776bd | 2009-08-21 12:28:34 +0000 | [diff] [blame] | 394 | .t_type = RDS_TRANS_IB |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 395 | }; |
| 396 | |
Zach Brown | ef87b7e | 2010-07-09 12:26:20 -0700 | [diff] [blame] | 397 | int rds_ib_init(void) |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 398 | { |
| 399 | int ret; |
| 400 | |
| 401 | INIT_LIST_HEAD(&rds_ib_devices); |
| 402 | |
Zach Brown | 515e079 | 2010-07-06 15:09:56 -0700 | [diff] [blame] | 403 | ret = ib_register_client(&rds_ib_client); |
| 404 | if (ret) |
Tejun Heo | c534a10 | 2011-02-01 11:42:43 +0100 | [diff] [blame] | 405 | goto out; |
Zach Brown | 515e079 | 2010-07-06 15:09:56 -0700 | [diff] [blame] | 406 | |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 407 | ret = rds_ib_sysctl_init(); |
| 408 | if (ret) |
| 409 | goto out_ibreg; |
| 410 | |
| 411 | ret = rds_ib_recv_init(); |
| 412 | if (ret) |
| 413 | goto out_sysctl; |
| 414 | |
| 415 | ret = rds_trans_register(&rds_ib_transport); |
| 416 | if (ret) |
| 417 | goto out_recv; |
| 418 | |
| 419 | rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); |
| 420 | |
| 421 | goto out; |
| 422 | |
| 423 | out_recv: |
| 424 | rds_ib_recv_exit(); |
| 425 | out_sysctl: |
| 426 | rds_ib_sysctl_exit(); |
| 427 | out_ibreg: |
Zach Brown | 24fa163 | 2010-06-25 14:59:49 -0700 | [diff] [blame] | 428 | rds_ib_unregister_client(); |
Andy Grover | ec16227 | 2009-02-24 15:30:30 +0000 | [diff] [blame] | 429 | out: |
| 430 | return ret; |
| 431 | } |
| 432 | |
| 433 | MODULE_LICENSE("GPL"); |
| 434 | |