Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. |
| 3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. |
| 4 | * |
| 5 | * This software is available to you under a choice of one of two |
| 6 | * licenses. You may choose to be licensed under the terms of the GNU |
| 7 | * General Public License (GPL) Version 2, available from the file |
| 8 | * COPYING in the main directory of this source tree, or the |
| 9 | * OpenIB.org BSD license below: |
| 10 | * |
| 11 | * Redistribution and use in source and binary forms, with or |
| 12 | * without modification, are permitted provided that the following |
| 13 | * conditions are met: |
| 14 | * |
| 15 | * - Redistributions of source code must retain the above |
| 16 | * copyright notice, this list of conditions and the following |
| 17 | * disclaimer. |
| 18 | * |
| 19 | * - Redistributions in binary form must reproduce the above |
| 20 | * copyright notice, this list of conditions and the following |
| 21 | * disclaimer in the documentation and/or other materials |
| 22 | * provided with the distribution. |
| 23 | * |
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 31 | * SOFTWARE. |
| 32 | */ |
| 33 | |
| 34 | #ifndef __C2_H |
| 35 | #define __C2_H |
| 36 | |
| 37 | #include <linux/netdevice.h> |
| 38 | #include <linux/spinlock.h> |
| 39 | #include <linux/kernel.h> |
| 40 | #include <linux/pci.h> |
| 41 | #include <linux/dma-mapping.h> |
| 42 | #include <linux/idr.h> |
| 43 | #include <asm/semaphore.h> |
| 44 | |
| 45 | #include "c2_provider.h" |
| 46 | #include "c2_mq.h" |
| 47 | #include "c2_status.h" |
| 48 | |
| 49 | #define DRV_NAME "c2" |
| 50 | #define DRV_VERSION "1.1" |
| 51 | #define PFX DRV_NAME ": " |
| 52 | |
| 53 | #define BAR_0 0 |
| 54 | #define BAR_2 2 |
| 55 | #define BAR_4 4 |
| 56 | |
| 57 | #define RX_BUF_SIZE (1536 + 8) |
| 58 | #define ETH_JUMBO_MTU 9000 |
| 59 | #define C2_MAGIC "CEPHEUS" |
| 60 | #define C2_VERSION 4 |
| 61 | #define C2_IVN (18 & 0x7fffffff) |
| 62 | |
| 63 | #define C2_REG0_SIZE (16 * 1024) |
| 64 | #define C2_REG2_SIZE (2 * 1024 * 1024) |
| 65 | #define C2_REG4_SIZE (256 * 1024 * 1024) |
| 66 | #define C2_NUM_TX_DESC 341 |
| 67 | #define C2_NUM_RX_DESC 256 |
| 68 | #define C2_PCI_REGS_OFFSET (0x10000) |
| 69 | #define C2_RXP_HRXDQ_OFFSET (((C2_REG4_SIZE)/2)) |
| 70 | #define C2_RXP_HRXDQ_SIZE (4096) |
| 71 | #define C2_TXP_HTXDQ_OFFSET (((C2_REG4_SIZE)/2) + C2_RXP_HRXDQ_SIZE) |
| 72 | #define C2_TXP_HTXDQ_SIZE (4096) |
| 73 | #define C2_TX_TIMEOUT (6*HZ) |
| 74 | |
| 75 | /* CEPHEUS */ |
| 76 | static const u8 c2_magic[] = { |
| 77 | 0x43, 0x45, 0x50, 0x48, 0x45, 0x55, 0x53 |
| 78 | }; |
| 79 | |
| 80 | enum adapter_pci_regs { |
| 81 | C2_REGS_MAGIC = 0x0000, |
| 82 | C2_REGS_VERS = 0x0008, |
| 83 | C2_REGS_IVN = 0x000C, |
| 84 | C2_REGS_PCI_WINSIZE = 0x0010, |
| 85 | C2_REGS_Q0_QSIZE = 0x0014, |
| 86 | C2_REGS_Q0_MSGSIZE = 0x0018, |
| 87 | C2_REGS_Q0_POOLSTART = 0x001C, |
| 88 | C2_REGS_Q0_SHARED = 0x0020, |
| 89 | C2_REGS_Q1_QSIZE = 0x0024, |
| 90 | C2_REGS_Q1_MSGSIZE = 0x0028, |
| 91 | C2_REGS_Q1_SHARED = 0x0030, |
| 92 | C2_REGS_Q2_QSIZE = 0x0034, |
| 93 | C2_REGS_Q2_MSGSIZE = 0x0038, |
| 94 | C2_REGS_Q2_SHARED = 0x0040, |
| 95 | C2_REGS_ENADDR = 0x004C, |
| 96 | C2_REGS_RDMA_ENADDR = 0x0054, |
| 97 | C2_REGS_HRX_CUR = 0x006C, |
| 98 | }; |
| 99 | |
| 100 | struct c2_adapter_pci_regs { |
| 101 | char reg_magic[8]; |
| 102 | u32 version; |
| 103 | u32 ivn; |
| 104 | u32 pci_window_size; |
| 105 | u32 q0_q_size; |
| 106 | u32 q0_msg_size; |
| 107 | u32 q0_pool_start; |
| 108 | u32 q0_shared; |
| 109 | u32 q1_q_size; |
| 110 | u32 q1_msg_size; |
| 111 | u32 q1_pool_start; |
| 112 | u32 q1_shared; |
| 113 | u32 q2_q_size; |
| 114 | u32 q2_msg_size; |
| 115 | u32 q2_pool_start; |
| 116 | u32 q2_shared; |
| 117 | u32 log_start; |
| 118 | u32 log_size; |
| 119 | u8 host_enaddr[8]; |
| 120 | u8 rdma_enaddr[8]; |
| 121 | u32 crash_entry; |
| 122 | u32 crash_ready[2]; |
| 123 | u32 fw_txd_cur; |
| 124 | u32 fw_hrxd_cur; |
| 125 | u32 fw_rxd_cur; |
| 126 | }; |
| 127 | |
| 128 | enum pci_regs { |
| 129 | C2_HISR = 0x0000, |
| 130 | C2_DISR = 0x0004, |
| 131 | C2_HIMR = 0x0008, |
| 132 | C2_DIMR = 0x000C, |
| 133 | C2_NISR0 = 0x0010, |
| 134 | C2_NISR1 = 0x0014, |
| 135 | C2_NIMR0 = 0x0018, |
| 136 | C2_NIMR1 = 0x001C, |
| 137 | C2_IDIS = 0x0020, |
| 138 | }; |
| 139 | |
| 140 | enum { |
| 141 | C2_PCI_HRX_INT = 1 << 8, |
| 142 | C2_PCI_HTX_INT = 1 << 17, |
| 143 | C2_PCI_HRX_QUI = 1 << 31, |
| 144 | }; |
| 145 | |
| 146 | /* |
| 147 | * Cepheus registers in BAR0. |
| 148 | */ |
| 149 | struct c2_pci_regs { |
| 150 | u32 hostisr; |
| 151 | u32 dmaisr; |
| 152 | u32 hostimr; |
| 153 | u32 dmaimr; |
| 154 | u32 netisr0; |
| 155 | u32 netisr1; |
| 156 | u32 netimr0; |
| 157 | u32 netimr1; |
| 158 | u32 int_disable; |
| 159 | }; |
| 160 | |
| 161 | /* TXP flags */ |
| 162 | enum c2_txp_flags { |
| 163 | TXP_HTXD_DONE = 0, |
| 164 | TXP_HTXD_READY = 1 << 0, |
| 165 | TXP_HTXD_UNINIT = 1 << 1, |
| 166 | }; |
| 167 | |
| 168 | /* RXP flags */ |
| 169 | enum c2_rxp_flags { |
| 170 | RXP_HRXD_UNINIT = 0, |
| 171 | RXP_HRXD_READY = 1 << 0, |
| 172 | RXP_HRXD_DONE = 1 << 1, |
| 173 | }; |
| 174 | |
| 175 | /* RXP status */ |
| 176 | enum c2_rxp_status { |
| 177 | RXP_HRXD_ZERO = 0, |
| 178 | RXP_HRXD_OK = 1 << 0, |
| 179 | RXP_HRXD_BUF_OV = 1 << 1, |
| 180 | }; |
| 181 | |
| 182 | /* TXP descriptor fields */ |
| 183 | enum txp_desc { |
| 184 | C2_TXP_FLAGS = 0x0000, |
| 185 | C2_TXP_LEN = 0x0002, |
| 186 | C2_TXP_ADDR = 0x0004, |
| 187 | }; |
| 188 | |
| 189 | /* RXP descriptor fields */ |
| 190 | enum rxp_desc { |
| 191 | C2_RXP_FLAGS = 0x0000, |
| 192 | C2_RXP_STATUS = 0x0002, |
| 193 | C2_RXP_COUNT = 0x0004, |
| 194 | C2_RXP_LEN = 0x0006, |
| 195 | C2_RXP_ADDR = 0x0008, |
| 196 | }; |
| 197 | |
| 198 | struct c2_txp_desc { |
| 199 | u16 flags; |
| 200 | u16 len; |
| 201 | u64 addr; |
| 202 | } __attribute__ ((packed)); |
| 203 | |
| 204 | struct c2_rxp_desc { |
| 205 | u16 flags; |
| 206 | u16 status; |
| 207 | u16 count; |
| 208 | u16 len; |
| 209 | u64 addr; |
| 210 | } __attribute__ ((packed)); |
| 211 | |
| 212 | struct c2_rxp_hdr { |
| 213 | u16 flags; |
| 214 | u16 status; |
| 215 | u16 len; |
| 216 | u16 rsvd; |
| 217 | } __attribute__ ((packed)); |
| 218 | |
| 219 | struct c2_tx_desc { |
| 220 | u32 len; |
| 221 | u32 status; |
| 222 | dma_addr_t next_offset; |
| 223 | }; |
| 224 | |
| 225 | struct c2_rx_desc { |
| 226 | u32 len; |
| 227 | u32 status; |
| 228 | dma_addr_t next_offset; |
| 229 | }; |
| 230 | |
| 231 | struct c2_alloc { |
| 232 | u32 last; |
| 233 | u32 max; |
| 234 | spinlock_t lock; |
| 235 | unsigned long *table; |
| 236 | }; |
| 237 | |
| 238 | struct c2_array { |
| 239 | struct { |
| 240 | void **page; |
| 241 | int used; |
| 242 | } *page_list; |
| 243 | }; |
| 244 | |
| 245 | /* |
| 246 | * The MQ shared pointer pool is organized as a linked list of |
| 247 | * chunks. Each chunk contains a linked list of free shared pointers |
| 248 | * that can be allocated to a given user mode client. |
| 249 | * |
| 250 | */ |
| 251 | struct sp_chunk { |
| 252 | struct sp_chunk *next; |
| 253 | dma_addr_t dma_addr; |
| 254 | DECLARE_PCI_UNMAP_ADDR(mapping); |
| 255 | u16 head; |
| 256 | u16 shared_ptr[0]; |
| 257 | }; |
| 258 | |
| 259 | struct c2_pd_table { |
| 260 | u32 last; |
| 261 | u32 max; |
| 262 | spinlock_t lock; |
| 263 | unsigned long *table; |
| 264 | }; |
| 265 | |
| 266 | struct c2_qp_table { |
| 267 | struct idr idr; |
| 268 | spinlock_t lock; |
| 269 | int last; |
| 270 | }; |
| 271 | |
| 272 | struct c2_element { |
| 273 | struct c2_element *next; |
| 274 | void *ht_desc; /* host descriptor */ |
| 275 | void __iomem *hw_desc; /* hardware descriptor */ |
| 276 | struct sk_buff *skb; |
| 277 | dma_addr_t mapaddr; |
| 278 | u32 maplen; |
| 279 | }; |
| 280 | |
| 281 | struct c2_ring { |
| 282 | struct c2_element *to_clean; |
| 283 | struct c2_element *to_use; |
| 284 | struct c2_element *start; |
| 285 | unsigned long count; |
| 286 | }; |
| 287 | |
| 288 | struct c2_dev { |
| 289 | struct ib_device ibdev; |
| 290 | void __iomem *regs; |
| 291 | void __iomem *mmio_txp_ring; /* remapped adapter memory for hw rings */ |
| 292 | void __iomem *mmio_rxp_ring; |
| 293 | spinlock_t lock; |
| 294 | struct pci_dev *pcidev; |
| 295 | struct net_device *netdev; |
| 296 | struct net_device *pseudo_netdev; |
| 297 | unsigned int cur_tx; |
| 298 | unsigned int cur_rx; |
| 299 | u32 adapter_handle; |
| 300 | int device_cap_flags; |
| 301 | void __iomem *kva; /* KVA device memory */ |
| 302 | unsigned long pa; /* PA device memory */ |
| 303 | void **qptr_array; |
| 304 | |
Roland Dreier | e54f818 | 2006-11-29 15:33:07 -0800 | [diff] [blame] | 305 | struct kmem_cache *host_msg_cache; |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 306 | |
| 307 | struct list_head cca_link; /* adapter list */ |
| 308 | struct list_head eh_wakeup_list; /* event wakeup list */ |
| 309 | wait_queue_head_t req_vq_wo; |
| 310 | |
| 311 | /* Cached RNIC properties */ |
| 312 | struct ib_device_attr props; |
| 313 | |
| 314 | struct c2_pd_table pd_table; |
| 315 | struct c2_qp_table qp_table; |
| 316 | int ports; /* num of GigE ports */ |
| 317 | int devnum; |
| 318 | spinlock_t vqlock; /* sync vbs req MQ */ |
| 319 | |
| 320 | /* Verbs Queues */ |
| 321 | struct c2_mq req_vq; /* Verbs Request MQ */ |
| 322 | struct c2_mq rep_vq; /* Verbs Reply MQ */ |
| 323 | struct c2_mq aeq; /* Async Events MQ */ |
| 324 | |
| 325 | /* Kernel client MQs */ |
| 326 | struct sp_chunk *kern_mqsp_pool; |
| 327 | |
| 328 | /* Device updates these values when posting messages to a host |
| 329 | * target queue */ |
| 330 | u16 req_vq_shared; |
| 331 | u16 rep_vq_shared; |
| 332 | u16 aeq_shared; |
| 333 | u16 irq_claimed; |
| 334 | |
| 335 | /* |
| 336 | * Shared host target pages for user-accessible MQs. |
| 337 | */ |
| 338 | int hthead; /* index of first free entry */ |
| 339 | void *htpages; /* kernel vaddr */ |
| 340 | int htlen; /* length of htpages memory */ |
| 341 | void *htuva; /* user mapped vaddr */ |
| 342 | spinlock_t htlock; /* serialize allocation */ |
| 343 | |
| 344 | u64 adapter_hint_uva; /* access to the activity FIFO */ |
| 345 | |
| 346 | // spinlock_t aeq_lock; |
| 347 | // spinlock_t rnic_lock; |
| 348 | |
Roland Dreier | dc544bc | 2008-04-16 21:01:08 -0700 | [diff] [blame] | 349 | __be16 *hint_count; |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 350 | dma_addr_t hint_count_dma; |
| 351 | u16 hints_read; |
| 352 | |
| 353 | int init; /* TRUE if it's ready */ |
| 354 | char ae_cache_name[16]; |
| 355 | char vq_cache_name[16]; |
| 356 | }; |
| 357 | |
| 358 | struct c2_port { |
| 359 | u32 msg_enable; |
| 360 | struct c2_dev *c2dev; |
| 361 | struct net_device *netdev; |
| 362 | |
| 363 | spinlock_t tx_lock; |
| 364 | u32 tx_avail; |
| 365 | struct c2_ring tx_ring; |
| 366 | struct c2_ring rx_ring; |
| 367 | |
| 368 | void *mem; /* PCI memory for host rings */ |
| 369 | dma_addr_t dma; |
| 370 | unsigned long mem_size; |
| 371 | |
| 372 | u32 rx_buf_size; |
| 373 | |
| 374 | struct net_device_stats netstats; |
| 375 | }; |
| 376 | |
| 377 | /* |
| 378 | * Activity FIFO registers in BAR0. |
| 379 | */ |
| 380 | #define PCI_BAR0_HOST_HINT 0x100 |
| 381 | #define PCI_BAR0_ADAPTER_HINT 0x2000 |
| 382 | |
| 383 | /* |
| 384 | * Ammasso PCI vendor id and Cepheus PCI device id. |
| 385 | */ |
| 386 | #define CQ_ARMED 0x01 |
| 387 | #define CQ_WAIT_FOR_DMA 0x80 |
| 388 | |
| 389 | /* |
| 390 | * The format of a hint is as follows: |
| 391 | * Lower 16 bits are the count of hints for the queue. |
| 392 | * Next 15 bits are the qp_index |
| 393 | * Upper most bit depends on who reads it: |
| 394 | * If read by producer, then it means Full (1) or Not-Full (0) |
| 395 | * If read by consumer, then it means Empty (1) or Not-Empty (0) |
| 396 | */ |
| 397 | #define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count) |
| 398 | #define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16) |
| 399 | #define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF) |
| 400 | |
| 401 | |
| 402 | /* |
| 403 | * The following defines the offset in SDRAM for the c2_adapter_pci_regs_t |
| 404 | * struct. |
| 405 | */ |
| 406 | #define C2_ADAPTER_PCI_REGS_OFFSET 0x10000 |
| 407 | |
| 408 | #ifndef readq |
| 409 | static inline u64 readq(const void __iomem * addr) |
| 410 | { |
| 411 | u64 ret = readl(addr + 4); |
| 412 | ret <<= 32; |
| 413 | ret |= readl(addr); |
| 414 | |
| 415 | return ret; |
| 416 | } |
| 417 | #endif |
| 418 | |
| 419 | #ifndef writeq |
| 420 | static inline void __raw_writeq(u64 val, void __iomem * addr) |
| 421 | { |
| 422 | __raw_writel((u32) (val), addr); |
| 423 | __raw_writel((u32) (val >> 32), (addr + 4)); |
| 424 | } |
| 425 | #endif |
| 426 | |
| 427 | #define C2_SET_CUR_RX(c2dev, cur_rx) \ |
Roland Dreier | dc544bc | 2008-04-16 21:01:08 -0700 | [diff] [blame] | 428 | __raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092) |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 429 | |
| 430 | #define C2_GET_CUR_RX(c2dev) \ |
Roland Dreier | dc544bc | 2008-04-16 21:01:08 -0700 | [diff] [blame] | 431 | be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092)) |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 432 | |
| 433 | static inline struct c2_dev *to_c2dev(struct ib_device *ibdev) |
| 434 | { |
| 435 | return container_of(ibdev, struct c2_dev, ibdev); |
| 436 | } |
| 437 | |
| 438 | static inline int c2_errno(void *reply) |
| 439 | { |
| 440 | switch (c2_wr_get_result(reply)) { |
| 441 | case C2_OK: |
| 442 | return 0; |
| 443 | case CCERR_NO_BUFS: |
| 444 | case CCERR_INSUFFICIENT_RESOURCES: |
| 445 | case CCERR_ZERO_RDMA_READ_RESOURCES: |
| 446 | return -ENOMEM; |
| 447 | case CCERR_MR_IN_USE: |
| 448 | case CCERR_QP_IN_USE: |
| 449 | return -EBUSY; |
| 450 | case CCERR_ADDR_IN_USE: |
| 451 | return -EADDRINUSE; |
| 452 | case CCERR_ADDR_NOT_AVAIL: |
| 453 | return -EADDRNOTAVAIL; |
| 454 | case CCERR_CONN_RESET: |
| 455 | return -ECONNRESET; |
| 456 | case CCERR_NOT_IMPLEMENTED: |
| 457 | case CCERR_INVALID_WQE: |
| 458 | return -ENOSYS; |
| 459 | case CCERR_QP_NOT_PRIVILEGED: |
| 460 | return -EPERM; |
| 461 | case CCERR_STACK_ERROR: |
| 462 | return -EPROTO; |
| 463 | case CCERR_ACCESS_VIOLATION: |
| 464 | case CCERR_BASE_AND_BOUNDS_VIOLATION: |
| 465 | return -EFAULT; |
| 466 | case CCERR_STAG_STATE_NOT_INVALID: |
| 467 | case CCERR_INVALID_ADDRESS: |
| 468 | case CCERR_INVALID_CQ: |
| 469 | case CCERR_INVALID_EP: |
| 470 | case CCERR_INVALID_MODIFIER: |
| 471 | case CCERR_INVALID_MTU: |
| 472 | case CCERR_INVALID_PD_ID: |
| 473 | case CCERR_INVALID_QP: |
| 474 | case CCERR_INVALID_RNIC: |
| 475 | case CCERR_INVALID_STAG: |
| 476 | return -EINVAL; |
| 477 | default: |
| 478 | return -EAGAIN; |
| 479 | } |
| 480 | } |
| 481 | |
| 482 | /* Device */ |
| 483 | extern int c2_register_device(struct c2_dev *c2dev); |
| 484 | extern void c2_unregister_device(struct c2_dev *c2dev); |
| 485 | extern int c2_rnic_init(struct c2_dev *c2dev); |
| 486 | extern void c2_rnic_term(struct c2_dev *c2dev); |
| 487 | extern void c2_rnic_interrupt(struct c2_dev *c2dev); |
Roland Dreier | dc544bc | 2008-04-16 21:01:08 -0700 | [diff] [blame] | 488 | extern int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask); |
| 489 | extern int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask); |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 490 | |
| 491 | /* QPs */ |
| 492 | extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd, |
| 493 | struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp); |
| 494 | extern void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp); |
| 495 | extern struct ib_qp *c2_get_qp(struct ib_device *device, int qpn); |
| 496 | extern int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, |
| 497 | struct ib_qp_attr *attr, int attr_mask); |
| 498 | extern int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, |
| 499 | int ord, int ird); |
| 500 | extern int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, |
| 501 | struct ib_send_wr **bad_wr); |
| 502 | extern int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, |
| 503 | struct ib_recv_wr **bad_wr); |
| 504 | extern void __devinit c2_init_qp_table(struct c2_dev *c2dev); |
| 505 | extern void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev); |
| 506 | extern void c2_set_qp_state(struct c2_qp *, int); |
| 507 | extern struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn); |
| 508 | |
| 509 | /* PDs */ |
| 510 | extern int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd); |
| 511 | extern void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd); |
| 512 | extern int __devinit c2_init_pd_table(struct c2_dev *c2dev); |
| 513 | extern void __devexit c2_cleanup_pd_table(struct c2_dev *c2dev); |
| 514 | |
| 515 | /* CQs */ |
| 516 | extern int c2_init_cq(struct c2_dev *c2dev, int entries, |
| 517 | struct c2_ucontext *ctx, struct c2_cq *cq); |
| 518 | extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq); |
| 519 | extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index); |
| 520 | extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index); |
| 521 | extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); |
Roland Dreier | ed23a72 | 2007-05-06 21:02:48 -0700 | [diff] [blame] | 522 | extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 523 | |
| 524 | /* CM */ |
| 525 | extern int c2_llp_connect(struct iw_cm_id *cm_id, |
| 526 | struct iw_cm_conn_param *iw_param); |
| 527 | extern int c2_llp_accept(struct iw_cm_id *cm_id, |
| 528 | struct iw_cm_conn_param *iw_param); |
| 529 | extern int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, |
| 530 | u8 pdata_len); |
| 531 | extern int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog); |
| 532 | extern int c2_llp_service_destroy(struct iw_cm_id *cm_id); |
| 533 | |
| 534 | /* MM */ |
| 535 | extern int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list, |
| 536 | int page_size, int pbl_depth, u32 length, |
| 537 | u32 off, u64 *va, enum c2_acf acf, |
| 538 | struct c2_mr *mr); |
| 539 | extern int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index); |
| 540 | |
| 541 | /* AE */ |
| 542 | extern void c2_ae_event(struct c2_dev *c2dev, u32 mq_index); |
| 543 | |
| 544 | /* MQSP Allocator */ |
| 545 | extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask, |
| 546 | struct sp_chunk **root); |
| 547 | extern void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root); |
Roland Dreier | 26c4fc2 | 2008-04-16 21:01:08 -0700 | [diff] [blame] | 548 | extern __be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, |
| 549 | dma_addr_t *dma_addr, gfp_t gfp_mask); |
| 550 | extern void c2_free_mqsp(__be16* mqsp); |
Tom Tucker | f94b533 | 2006-09-22 15:22:48 -0700 | [diff] [blame] | 551 | #endif |