Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. |
| 3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. |
| 4 | * |
| 5 | * This software is available to you under a choice of one of two |
| 6 | * licenses. You may choose to be licensed under the terms of the GNU |
| 7 | * General Public License (GPL) Version 2, available from the file |
| 8 | * COPYING in the main directory of this source tree, or the |
| 9 | * OpenIB.org BSD license below: |
| 10 | * |
| 11 | * Redistribution and use in source and binary forms, with or |
| 12 | * without modification, are permitted provided that the following |
| 13 | * conditions are met: |
| 14 | * |
| 15 | * - Redistributions of source code must retain the above |
| 16 | * copyright notice, this list of conditions and the following |
| 17 | * disclaimer. |
| 18 | * |
| 19 | * - Redistributions in binary form must reproduce the above |
| 20 | * copyright notice, this list of conditions and the following |
| 21 | * disclaimer in the documentation and/or other materials |
| 22 | * provided with the distribution. |
| 23 | * |
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 31 | * SOFTWARE. |
| 32 | * |
| 33 | */ |
| 34 | |
| 35 | |
| 36 | #include <linux/module.h> |
| 37 | #include <linux/moduleparam.h> |
| 38 | #include <linux/pci.h> |
| 39 | #include <linux/netdevice.h> |
| 40 | #include <linux/etherdevice.h> |
| 41 | #include <linux/delay.h> |
| 42 | #include <linux/ethtool.h> |
| 43 | #include <linux/mii.h> |
| 44 | #include <linux/if_vlan.h> |
| 45 | #include <linux/crc32.h> |
| 46 | #include <linux/in.h> |
| 47 | #include <linux/ip.h> |
| 48 | #include <linux/tcp.h> |
| 49 | #include <linux/init.h> |
| 50 | #include <linux/dma-mapping.h> |
| 51 | #include <linux/mm.h> |
| 52 | #include <linux/inet.h> |
Al Viro | d7b2004 | 2006-09-23 16:44:16 +0100 | [diff] [blame] | 53 | #include <linux/vmalloc.h> |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 54 | |
| 55 | #include <linux/route.h> |
| 56 | |
| 57 | #include <asm/io.h> |
| 58 | #include <asm/irq.h> |
| 59 | #include <asm/byteorder.h> |
| 60 | #include <rdma/ib_smi.h> |
| 61 | #include "c2.h" |
| 62 | #include "c2_vq.h" |
| 63 | |
| 64 | /* Device capabilities */ |
| 65 | #define C2_MIN_PAGESIZE 1024 |
| 66 | |
| 67 | #define C2_MAX_MRS 32768 |
| 68 | #define C2_MAX_QPS 16000 |
| 69 | #define C2_MAX_WQE_SZ 256 |
| 70 | #define C2_MAX_QP_WR ((128*1024)/C2_MAX_WQE_SZ) |
| 71 | #define C2_MAX_SGES 4 |
| 72 | #define C2_MAX_SGE_RD 1 |
| 73 | #define C2_MAX_CQS 32768 |
| 74 | #define C2_MAX_CQES 4096 |
| 75 | #define C2_MAX_PDS 16384 |
| 76 | |
| 77 | /* |
| 78 | * Send the adapter INIT message to the amso1100 |
| 79 | */ |
| 80 | static int c2_adapter_init(struct c2_dev *c2dev) |
| 81 | { |
| 82 | struct c2wr_init_req wr; |
| 83 | int err; |
| 84 | |
| 85 | memset(&wr, 0, sizeof(wr)); |
| 86 | c2_wr_set_id(&wr, CCWR_INIT); |
| 87 | wr.hdr.context = 0; |
| 88 | wr.hint_count = cpu_to_be64(c2dev->hint_count_dma); |
| 89 | wr.q0_host_shared = cpu_to_be64(c2dev->req_vq.shared_dma); |
| 90 | wr.q1_host_shared = cpu_to_be64(c2dev->rep_vq.shared_dma); |
| 91 | wr.q1_host_msg_pool = cpu_to_be64(c2dev->rep_vq.host_dma); |
| 92 | wr.q2_host_shared = cpu_to_be64(c2dev->aeq.shared_dma); |
| 93 | wr.q2_host_msg_pool = cpu_to_be64(c2dev->aeq.host_dma); |
| 94 | |
| 95 | /* Post the init message */ |
| 96 | err = vq_send_wr(c2dev, (union c2wr *) & wr); |
| 97 | |
| 98 | return err; |
| 99 | } |
| 100 | |
| 101 | /* |
| 102 | * Send the adapter TERM message to the amso1100 |
| 103 | */ |
| 104 | static void c2_adapter_term(struct c2_dev *c2dev) |
| 105 | { |
| 106 | struct c2wr_init_req wr; |
| 107 | |
| 108 | memset(&wr, 0, sizeof(wr)); |
| 109 | c2_wr_set_id(&wr, CCWR_TERM); |
| 110 | wr.hdr.context = 0; |
| 111 | |
| 112 | /* Post the init message */ |
| 113 | vq_send_wr(c2dev, (union c2wr *) & wr); |
| 114 | c2dev->init = 0; |
| 115 | |
| 116 | return; |
| 117 | } |
| 118 | |
| 119 | /* |
| 120 | * Query the adapter |
| 121 | */ |
| 122 | static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props) |
| 123 | { |
| 124 | struct c2_vq_req *vq_req; |
| 125 | struct c2wr_rnic_query_req wr; |
| 126 | struct c2wr_rnic_query_rep *reply; |
| 127 | int err; |
| 128 | |
| 129 | vq_req = vq_req_alloc(c2dev); |
| 130 | if (!vq_req) |
| 131 | return -ENOMEM; |
| 132 | |
| 133 | c2_wr_set_id(&wr, CCWR_RNIC_QUERY); |
| 134 | wr.hdr.context = (unsigned long) vq_req; |
| 135 | wr.rnic_handle = c2dev->adapter_handle; |
| 136 | |
| 137 | vq_req_get(c2dev, vq_req); |
| 138 | |
| 139 | err = vq_send_wr(c2dev, (union c2wr *) &wr); |
| 140 | if (err) { |
| 141 | vq_req_put(c2dev, vq_req); |
| 142 | goto bail1; |
| 143 | } |
| 144 | |
| 145 | err = vq_wait_for_reply(c2dev, vq_req); |
| 146 | if (err) |
| 147 | goto bail1; |
| 148 | |
| 149 | reply = |
| 150 | (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg); |
| 151 | if (!reply) |
| 152 | err = -ENOMEM; |
Adrian Bunk | fb7711e | 2006-10-10 14:26:02 -0700 | [diff] [blame] | 153 | else |
| 154 | err = c2_errno(reply); |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 155 | if (err) |
| 156 | goto bail2; |
| 157 | |
| 158 | props->fw_ver = |
| 159 | ((u64)be32_to_cpu(reply->fw_ver_major) << 32) | |
Jean Delvare | b26c791 | 2006-11-09 21:02:26 +0100 | [diff] [blame] | 160 | ((be32_to_cpu(reply->fw_ver_minor) & 0xFFFF) << 16) | |
| 161 | (be32_to_cpu(reply->fw_ver_patch) & 0xFFFF); |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 162 | memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6); |
| 163 | props->max_mr_size = 0xFFFFFFFF; |
| 164 | props->page_size_cap = ~(C2_MIN_PAGESIZE-1); |
| 165 | props->vendor_id = be32_to_cpu(reply->vendor_id); |
| 166 | props->vendor_part_id = be32_to_cpu(reply->part_number); |
| 167 | props->hw_ver = be32_to_cpu(reply->hw_version); |
| 168 | props->max_qp = be32_to_cpu(reply->max_qps); |
| 169 | props->max_qp_wr = be32_to_cpu(reply->max_qp_depth); |
| 170 | props->device_cap_flags = c2dev->device_cap_flags; |
| 171 | props->max_sge = C2_MAX_SGES; |
| 172 | props->max_sge_rd = C2_MAX_SGE_RD; |
| 173 | props->max_cq = be32_to_cpu(reply->max_cqs); |
| 174 | props->max_cqe = be32_to_cpu(reply->max_cq_depth); |
| 175 | props->max_mr = be32_to_cpu(reply->max_mrs); |
| 176 | props->max_pd = be32_to_cpu(reply->max_pds); |
| 177 | props->max_qp_rd_atom = be32_to_cpu(reply->max_qp_ird); |
| 178 | props->max_ee_rd_atom = 0; |
| 179 | props->max_res_rd_atom = be32_to_cpu(reply->max_global_ird); |
| 180 | props->max_qp_init_rd_atom = be32_to_cpu(reply->max_qp_ord); |
| 181 | props->max_ee_init_rd_atom = 0; |
| 182 | props->atomic_cap = IB_ATOMIC_NONE; |
| 183 | props->max_ee = 0; |
| 184 | props->max_rdd = 0; |
| 185 | props->max_mw = be32_to_cpu(reply->max_mws); |
| 186 | props->max_raw_ipv6_qp = 0; |
| 187 | props->max_raw_ethy_qp = 0; |
| 188 | props->max_mcast_grp = 0; |
| 189 | props->max_mcast_qp_attach = 0; |
| 190 | props->max_total_mcast_qp_attach = 0; |
| 191 | props->max_ah = 0; |
| 192 | props->max_fmr = 0; |
| 193 | props->max_map_per_fmr = 0; |
| 194 | props->max_srq = 0; |
| 195 | props->max_srq_wr = 0; |
| 196 | props->max_srq_sge = 0; |
| 197 | props->max_pkeys = 0; |
| 198 | props->local_ca_ack_delay = 0; |
| 199 | |
| 200 | bail2: |
| 201 | vq_repbuf_free(c2dev, reply); |
| 202 | |
| 203 | bail1: |
| 204 | vq_req_free(c2dev, vq_req); |
| 205 | return err; |
| 206 | } |
| 207 | |
| 208 | /* |
| 209 | * Add an IP address to the RNIC interface |
| 210 | */ |
| 211 | int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask) |
| 212 | { |
| 213 | struct c2_vq_req *vq_req; |
| 214 | struct c2wr_rnic_setconfig_req *wr; |
| 215 | struct c2wr_rnic_setconfig_rep *reply; |
| 216 | struct c2_netaddr netaddr; |
| 217 | int err, len; |
| 218 | |
| 219 | vq_req = vq_req_alloc(c2dev); |
| 220 | if (!vq_req) |
| 221 | return -ENOMEM; |
| 222 | |
| 223 | len = sizeof(struct c2_netaddr); |
| 224 | wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); |
| 225 | if (!wr) { |
| 226 | err = -ENOMEM; |
| 227 | goto bail0; |
| 228 | } |
| 229 | |
| 230 | c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG); |
| 231 | wr->hdr.context = (unsigned long) vq_req; |
| 232 | wr->rnic_handle = c2dev->adapter_handle; |
| 233 | wr->option = cpu_to_be32(C2_CFG_ADD_ADDR); |
| 234 | |
| 235 | netaddr.ip_addr = inaddr; |
| 236 | netaddr.netmask = inmask; |
| 237 | netaddr.mtu = 0; |
| 238 | |
| 239 | memcpy(wr->data, &netaddr, len); |
| 240 | |
| 241 | vq_req_get(c2dev, vq_req); |
| 242 | |
| 243 | err = vq_send_wr(c2dev, (union c2wr *) wr); |
| 244 | if (err) { |
| 245 | vq_req_put(c2dev, vq_req); |
| 246 | goto bail1; |
| 247 | } |
| 248 | |
| 249 | err = vq_wait_for_reply(c2dev, vq_req); |
| 250 | if (err) |
| 251 | goto bail1; |
| 252 | |
| 253 | reply = |
| 254 | (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg); |
| 255 | if (!reply) { |
| 256 | err = -ENOMEM; |
| 257 | goto bail1; |
| 258 | } |
| 259 | |
| 260 | err = c2_errno(reply); |
| 261 | vq_repbuf_free(c2dev, reply); |
| 262 | |
| 263 | bail1: |
| 264 | kfree(wr); |
| 265 | bail0: |
| 266 | vq_req_free(c2dev, vq_req); |
| 267 | return err; |
| 268 | } |
| 269 | |
| 270 | /* |
| 271 | * Delete an IP address from the RNIC interface |
| 272 | */ |
| 273 | int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask) |
| 274 | { |
| 275 | struct c2_vq_req *vq_req; |
| 276 | struct c2wr_rnic_setconfig_req *wr; |
| 277 | struct c2wr_rnic_setconfig_rep *reply; |
| 278 | struct c2_netaddr netaddr; |
| 279 | int err, len; |
| 280 | |
| 281 | vq_req = vq_req_alloc(c2dev); |
| 282 | if (!vq_req) |
| 283 | return -ENOMEM; |
| 284 | |
| 285 | len = sizeof(struct c2_netaddr); |
| 286 | wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); |
| 287 | if (!wr) { |
| 288 | err = -ENOMEM; |
| 289 | goto bail0; |
| 290 | } |
| 291 | |
| 292 | c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG); |
| 293 | wr->hdr.context = (unsigned long) vq_req; |
| 294 | wr->rnic_handle = c2dev->adapter_handle; |
| 295 | wr->option = cpu_to_be32(C2_CFG_DEL_ADDR); |
| 296 | |
| 297 | netaddr.ip_addr = inaddr; |
| 298 | netaddr.netmask = inmask; |
| 299 | netaddr.mtu = 0; |
| 300 | |
| 301 | memcpy(wr->data, &netaddr, len); |
| 302 | |
| 303 | vq_req_get(c2dev, vq_req); |
| 304 | |
| 305 | err = vq_send_wr(c2dev, (union c2wr *) wr); |
| 306 | if (err) { |
| 307 | vq_req_put(c2dev, vq_req); |
| 308 | goto bail1; |
| 309 | } |
| 310 | |
| 311 | err = vq_wait_for_reply(c2dev, vq_req); |
| 312 | if (err) |
| 313 | goto bail1; |
| 314 | |
| 315 | reply = |
| 316 | (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg); |
| 317 | if (!reply) { |
| 318 | err = -ENOMEM; |
| 319 | goto bail1; |
| 320 | } |
| 321 | |
| 322 | err = c2_errno(reply); |
| 323 | vq_repbuf_free(c2dev, reply); |
| 324 | |
| 325 | bail1: |
| 326 | kfree(wr); |
| 327 | bail0: |
| 328 | vq_req_free(c2dev, vq_req); |
| 329 | return err; |
| 330 | } |
| 331 | |
| 332 | /* |
| 333 | * Open a single RNIC instance to use with all |
| 334 | * low level openib calls |
| 335 | */ |
| 336 | static int c2_rnic_open(struct c2_dev *c2dev) |
| 337 | { |
| 338 | struct c2_vq_req *vq_req; |
| 339 | union c2wr wr; |
| 340 | struct c2wr_rnic_open_rep *reply; |
| 341 | int err; |
| 342 | |
| 343 | vq_req = vq_req_alloc(c2dev); |
| 344 | if (vq_req == NULL) { |
| 345 | return -ENOMEM; |
| 346 | } |
| 347 | |
| 348 | memset(&wr, 0, sizeof(wr)); |
| 349 | c2_wr_set_id(&wr, CCWR_RNIC_OPEN); |
| 350 | wr.rnic_open.req.hdr.context = (unsigned long) (vq_req); |
| 351 | wr.rnic_open.req.flags = cpu_to_be16(RNIC_PRIV_MODE); |
| 352 | wr.rnic_open.req.port_num = cpu_to_be16(0); |
| 353 | wr.rnic_open.req.user_context = (unsigned long) c2dev; |
| 354 | |
| 355 | vq_req_get(c2dev, vq_req); |
| 356 | |
| 357 | err = vq_send_wr(c2dev, &wr); |
| 358 | if (err) { |
| 359 | vq_req_put(c2dev, vq_req); |
| 360 | goto bail0; |
| 361 | } |
| 362 | |
| 363 | err = vq_wait_for_reply(c2dev, vq_req); |
| 364 | if (err) { |
| 365 | goto bail0; |
| 366 | } |
| 367 | |
| 368 | reply = (struct c2wr_rnic_open_rep *) (unsigned long) (vq_req->reply_msg); |
| 369 | if (!reply) { |
| 370 | err = -ENOMEM; |
| 371 | goto bail0; |
| 372 | } |
| 373 | |
| 374 | if ((err = c2_errno(reply)) != 0) { |
| 375 | goto bail1; |
| 376 | } |
| 377 | |
| 378 | c2dev->adapter_handle = reply->rnic_handle; |
| 379 | |
| 380 | bail1: |
| 381 | vq_repbuf_free(c2dev, reply); |
| 382 | bail0: |
| 383 | vq_req_free(c2dev, vq_req); |
| 384 | return err; |
| 385 | } |
| 386 | |
| 387 | /* |
| 388 | * Close the RNIC instance |
| 389 | */ |
| 390 | static int c2_rnic_close(struct c2_dev *c2dev) |
| 391 | { |
| 392 | struct c2_vq_req *vq_req; |
| 393 | union c2wr wr; |
| 394 | struct c2wr_rnic_close_rep *reply; |
| 395 | int err; |
| 396 | |
| 397 | vq_req = vq_req_alloc(c2dev); |
| 398 | if (vq_req == NULL) { |
| 399 | return -ENOMEM; |
| 400 | } |
| 401 | |
| 402 | memset(&wr, 0, sizeof(wr)); |
| 403 | c2_wr_set_id(&wr, CCWR_RNIC_CLOSE); |
| 404 | wr.rnic_close.req.hdr.context = (unsigned long) vq_req; |
| 405 | wr.rnic_close.req.rnic_handle = c2dev->adapter_handle; |
| 406 | |
| 407 | vq_req_get(c2dev, vq_req); |
| 408 | |
| 409 | err = vq_send_wr(c2dev, &wr); |
| 410 | if (err) { |
| 411 | vq_req_put(c2dev, vq_req); |
| 412 | goto bail0; |
| 413 | } |
| 414 | |
| 415 | err = vq_wait_for_reply(c2dev, vq_req); |
| 416 | if (err) { |
| 417 | goto bail0; |
| 418 | } |
| 419 | |
| 420 | reply = (struct c2wr_rnic_close_rep *) (unsigned long) (vq_req->reply_msg); |
| 421 | if (!reply) { |
| 422 | err = -ENOMEM; |
| 423 | goto bail0; |
| 424 | } |
| 425 | |
| 426 | if ((err = c2_errno(reply)) != 0) { |
| 427 | goto bail1; |
| 428 | } |
| 429 | |
| 430 | c2dev->adapter_handle = 0; |
| 431 | |
| 432 | bail1: |
| 433 | vq_repbuf_free(c2dev, reply); |
| 434 | bail0: |
| 435 | vq_req_free(c2dev, vq_req); |
| 436 | return err; |
| 437 | } |
| 438 | |
| 439 | /* |
| 440 | * Called by c2_probe to initialize the RNIC. This principally |
| 441 | * involves initalizing the various limits and resouce pools that |
| 442 | * comprise the RNIC instance. |
| 443 | */ |
Roland Dreier | 2966612 | 2006-11-29 15:33:07 -0800 | [diff] [blame] | 444 | int __devinit c2_rnic_init(struct c2_dev *c2dev) |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 445 | { |
| 446 | int err; |
| 447 | u32 qsize, msgsize; |
| 448 | void *q1_pages; |
| 449 | void *q2_pages; |
| 450 | void __iomem *mmio_regs; |
| 451 | |
| 452 | /* Device capabilities */ |
| 453 | c2dev->device_cap_flags = |
| 454 | (IB_DEVICE_RESIZE_MAX_WR | |
| 455 | IB_DEVICE_CURR_QP_STATE_MOD | |
| 456 | IB_DEVICE_SYS_IMAGE_GUID | |
| 457 | IB_DEVICE_ZERO_STAG | |
| 458 | IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW); |
| 459 | |
| 460 | /* Allocate the qptr_array */ |
| 461 | c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *)); |
| 462 | if (!c2dev->qptr_array) { |
| 463 | return -ENOMEM; |
| 464 | } |
| 465 | |
| 466 | /* Inialize the qptr_array */ |
| 467 | memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *)); |
| 468 | c2dev->qptr_array[0] = (void *) &c2dev->req_vq; |
| 469 | c2dev->qptr_array[1] = (void *) &c2dev->rep_vq; |
| 470 | c2dev->qptr_array[2] = (void *) &c2dev->aeq; |
| 471 | |
| 472 | /* Initialize data structures */ |
| 473 | init_waitqueue_head(&c2dev->req_vq_wo); |
| 474 | spin_lock_init(&c2dev->vqlock); |
| 475 | spin_lock_init(&c2dev->lock); |
| 476 | |
| 477 | /* Allocate MQ shared pointer pool for kernel clients. User |
| 478 | * mode client pools are hung off the user context |
| 479 | */ |
| 480 | err = c2_init_mqsp_pool(c2dev, GFP_KERNEL, &c2dev->kern_mqsp_pool); |
| 481 | if (err) { |
| 482 | goto bail0; |
| 483 | } |
| 484 | |
| 485 | /* Allocate shared pointers for Q0, Q1, and Q2 from |
| 486 | * the shared pointer pool. |
| 487 | */ |
| 488 | |
| 489 | c2dev->hint_count = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, |
| 490 | &c2dev->hint_count_dma, |
| 491 | GFP_KERNEL); |
| 492 | c2dev->req_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, |
| 493 | &c2dev->req_vq.shared_dma, |
| 494 | GFP_KERNEL); |
| 495 | c2dev->rep_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, |
| 496 | &c2dev->rep_vq.shared_dma, |
| 497 | GFP_KERNEL); |
| 498 | c2dev->aeq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, |
| 499 | &c2dev->aeq.shared_dma, GFP_KERNEL); |
| 500 | if (!c2dev->hint_count || !c2dev->req_vq.shared || |
| 501 | !c2dev->rep_vq.shared || !c2dev->aeq.shared) { |
| 502 | err = -ENOMEM; |
| 503 | goto bail1; |
| 504 | } |
| 505 | |
| 506 | mmio_regs = c2dev->kva; |
| 507 | /* Initialize the Verbs Request Queue */ |
| 508 | c2_mq_req_init(&c2dev->req_vq, 0, |
| 509 | be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_QSIZE)), |
| 510 | be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_MSGSIZE)), |
| 511 | mmio_regs + |
| 512 | be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_POOLSTART)), |
| 513 | mmio_regs + |
| 514 | be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_SHARED)), |
| 515 | C2_MQ_ADAPTER_TARGET); |
| 516 | |
| 517 | /* Initialize the Verbs Reply Queue */ |
| 518 | qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); |
| 519 | msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); |
Steve Wise | 8de94ce | 2006-10-27 17:28:35 -0500 | [diff] [blame] | 520 | q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, |
| 521 | &c2dev->rep_vq.host_dma, GFP_KERNEL); |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 522 | if (!q1_pages) { |
| 523 | err = -ENOMEM; |
| 524 | goto bail1; |
| 525 | } |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 526 | pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); |
| 527 | pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, |
Roland Dreier | 6edf602 | 2006-09-27 14:42:56 -0700 | [diff] [blame] | 528 | (unsigned long long) c2dev->rep_vq.host_dma); |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 529 | c2_mq_rep_init(&c2dev->rep_vq, |
| 530 | 1, |
| 531 | qsize, |
| 532 | msgsize, |
| 533 | q1_pages, |
| 534 | mmio_regs + |
| 535 | be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_SHARED)), |
| 536 | C2_MQ_HOST_TARGET); |
| 537 | |
| 538 | /* Initialize the Asynchronus Event Queue */ |
| 539 | qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); |
| 540 | msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); |
Steve Wise | 8de94ce | 2006-10-27 17:28:35 -0500 | [diff] [blame] | 541 | q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, |
| 542 | &c2dev->aeq.host_dma, GFP_KERNEL); |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 543 | if (!q2_pages) { |
| 544 | err = -ENOMEM; |
| 545 | goto bail2; |
| 546 | } |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 547 | pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); |
Steve Wise | d7b748d | 2006-10-30 20:52:53 -0800 | [diff] [blame] | 548 | pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q2_pages, |
| 549 | (unsigned long long) c2dev->aeq.host_dma); |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 550 | c2_mq_rep_init(&c2dev->aeq, |
| 551 | 2, |
| 552 | qsize, |
| 553 | msgsize, |
| 554 | q2_pages, |
| 555 | mmio_regs + |
| 556 | be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_SHARED)), |
| 557 | C2_MQ_HOST_TARGET); |
| 558 | |
| 559 | /* Initialize the verbs request allocator */ |
| 560 | err = vq_init(c2dev); |
| 561 | if (err) |
| 562 | goto bail3; |
| 563 | |
| 564 | /* Enable interrupts on the adapter */ |
| 565 | writel(0, c2dev->regs + C2_IDIS); |
| 566 | |
| 567 | /* create the WR init message */ |
| 568 | err = c2_adapter_init(c2dev); |
| 569 | if (err) |
| 570 | goto bail4; |
| 571 | c2dev->init++; |
| 572 | |
| 573 | /* open an adapter instance */ |
| 574 | err = c2_rnic_open(c2dev); |
| 575 | if (err) |
| 576 | goto bail4; |
| 577 | |
| 578 | /* Initialize cached the adapter limits */ |
| 579 | if (c2_rnic_query(c2dev, &c2dev->props)) |
| 580 | goto bail5; |
| 581 | |
| 582 | /* Initialize the PD pool */ |
| 583 | err = c2_init_pd_table(c2dev); |
| 584 | if (err) |
| 585 | goto bail5; |
| 586 | |
| 587 | /* Initialize the QP pool */ |
| 588 | c2_init_qp_table(c2dev); |
| 589 | return 0; |
| 590 | |
| 591 | bail5: |
| 592 | c2_rnic_close(c2dev); |
| 593 | bail4: |
| 594 | vq_term(c2dev); |
| 595 | bail3: |
Steve Wise | 8de94ce | 2006-10-27 17:28:35 -0500 | [diff] [blame] | 596 | dma_free_coherent(&c2dev->pcidev->dev, |
| 597 | c2dev->aeq.q_size * c2dev->aeq.msg_size, |
| 598 | q2_pages, pci_unmap_addr(&c2dev->aeq, mapping)); |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 599 | bail2: |
Steve Wise | 8de94ce | 2006-10-27 17:28:35 -0500 | [diff] [blame] | 600 | dma_free_coherent(&c2dev->pcidev->dev, |
| 601 | c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, |
| 602 | q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping)); |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 603 | bail1: |
| 604 | c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); |
| 605 | bail0: |
| 606 | vfree(c2dev->qptr_array); |
| 607 | |
| 608 | return err; |
| 609 | } |
| 610 | |
| 611 | /* |
| 612 | * Called by c2_remove to cleanup the RNIC resources. |
| 613 | */ |
Roland Dreier | 2966612 | 2006-11-29 15:33:07 -0800 | [diff] [blame] | 614 | void __devexit c2_rnic_term(struct c2_dev *c2dev) |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 615 | { |
| 616 | |
| 617 | /* Close the open adapter instance */ |
| 618 | c2_rnic_close(c2dev); |
| 619 | |
| 620 | /* Send the TERM message to the adapter */ |
| 621 | c2_adapter_term(c2dev); |
| 622 | |
| 623 | /* Disable interrupts on the adapter */ |
| 624 | writel(1, c2dev->regs + C2_IDIS); |
| 625 | |
| 626 | /* Free the QP pool */ |
| 627 | c2_cleanup_qp_table(c2dev); |
| 628 | |
| 629 | /* Free the PD pool */ |
| 630 | c2_cleanup_pd_table(c2dev); |
| 631 | |
| 632 | /* Free the verbs request allocator */ |
| 633 | vq_term(c2dev); |
| 634 | |
Steve Wise | 8de94ce | 2006-10-27 17:28:35 -0500 | [diff] [blame] | 635 | /* Free the asynchronus event queue */ |
| 636 | dma_free_coherent(&c2dev->pcidev->dev, |
| 637 | c2dev->aeq.q_size * c2dev->aeq.msg_size, |
| 638 | c2dev->aeq.msg_pool.host, |
| 639 | pci_unmap_addr(&c2dev->aeq, mapping)); |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 640 | |
Steve Wise | 8de94ce | 2006-10-27 17:28:35 -0500 | [diff] [blame] | 641 | /* Free the verbs reply queue */ |
| 642 | dma_free_coherent(&c2dev->pcidev->dev, |
| 643 | c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, |
| 644 | c2dev->rep_vq.msg_pool.host, |
| 645 | pci_unmap_addr(&c2dev->rep_vq, mapping)); |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 646 | |
| 647 | /* Free the MQ shared pointer pool */ |
| 648 | c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); |
| 649 | |
| 650 | /* Free the qptr_array */ |
| 651 | vfree(c2dev->qptr_array); |
| 652 | |
| 653 | return; |
| 654 | } |