oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016 Hisilicon Limited. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | |
| 33 | #ifndef _HNS_ROCE_DEVICE_H |
| 34 | #define _HNS_ROCE_DEVICE_H |
| 35 | |
| 36 | #include <rdma/ib_verbs.h> |
| 37 | |
| 38 | #define DRV_NAME "hns_roce" |
| 39 | |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 40 | #define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6') |
| 41 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 42 | #define MAC_ADDR_OCTET_NUM 6 |
| 43 | #define HNS_ROCE_MAX_MSG_LEN 0x80000000 |
| 44 | |
| 45 | #define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b)) |
| 46 | |
| 47 | #define HNS_ROCE_IB_MIN_SQ_STRIDE 6 |
| 48 | |
| 49 | #define HNS_ROCE_BA_SIZE (32 * 4096) |
| 50 | |
| 51 | /* Hardware specification only for v1 engine */ |
| 52 | #define HNS_ROCE_MIN_CQE_NUM 0x40 |
| 53 | #define HNS_ROCE_MIN_WQE_NUM 0x20 |
| 54 | |
| 55 | /* Hardware specification only for v1 engine */ |
| 56 | #define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7 |
| 57 | #define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000 |
| 58 | |
Shaobo Xu | afb6b09 | 2016-11-29 23:10:29 +0000 | [diff] [blame^] | 59 | #define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20 |
| 60 | #define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \ |
| 61 | (5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS) |
| 62 | #define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2 |
| 63 | #define HNS_ROCE_MIN_CQE_CNT 16 |
| 64 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 65 | #define HNS_ROCE_MAX_IRQ_NUM 34 |
| 66 | |
| 67 | #define HNS_ROCE_COMP_VEC_NUM 32 |
| 68 | |
| 69 | #define HNS_ROCE_AEQE_VEC_NUM 1 |
| 70 | #define HNS_ROCE_AEQE_OF_VEC_NUM 1 |
| 71 | |
| 72 | /* 4G/4K = 1M */ |
Lijun Ou | ac11125 | 2016-09-15 23:48:13 +0100 | [diff] [blame] | 73 | #define HNS_ROCE_SL_SHIFT 28 |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 74 | #define HNS_ROCE_TCLASS_SHIFT 20 |
| 75 | #define HNS_ROCE_FLOW_LABLE_MASK 0xfffff |
| 76 | |
| 77 | #define HNS_ROCE_MAX_PORTS 6 |
| 78 | #define HNS_ROCE_MAX_GID_NUM 16 |
| 79 | #define HNS_ROCE_GID_SIZE 16 |
| 80 | |
Wei Hu (Xavier) | 5e6ff78 | 2016-11-23 19:41:07 +0000 | [diff] [blame] | 81 | #define BITMAP_NO_RR 0 |
| 82 | #define BITMAP_RR 1 |
| 83 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 84 | #define MR_TYPE_MR 0x00 |
| 85 | #define MR_TYPE_DMA 0x03 |
| 86 | |
| 87 | #define PKEY_ID 0xffff |
Lijun Ou | 3164466 | 2016-09-15 23:48:07 +0100 | [diff] [blame] | 88 | #define GUID_LEN 8 |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 89 | #define NODE_DESC_SIZE 64 |
Lijun Ou | 509bf0c | 2016-09-15 23:48:12 +0100 | [diff] [blame] | 90 | #define DB_REG_OFFSET 0x1000 |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 91 | |
| 92 | #define SERV_TYPE_RC 0 |
| 93 | #define SERV_TYPE_RD 1 |
| 94 | #define SERV_TYPE_UC 2 |
| 95 | #define SERV_TYPE_UD 3 |
| 96 | |
| 97 | #define PAGES_SHIFT_8 8 |
| 98 | #define PAGES_SHIFT_16 16 |
| 99 | #define PAGES_SHIFT_24 24 |
| 100 | #define PAGES_SHIFT_32 32 |
| 101 | |
| 102 | enum hns_roce_qp_state { |
| 103 | HNS_ROCE_QP_STATE_RST, |
| 104 | HNS_ROCE_QP_STATE_INIT, |
| 105 | HNS_ROCE_QP_STATE_RTR, |
| 106 | HNS_ROCE_QP_STATE_RTS, |
| 107 | HNS_ROCE_QP_STATE_SQD, |
| 108 | HNS_ROCE_QP_STATE_ERR, |
| 109 | HNS_ROCE_QP_NUM_STATE, |
| 110 | }; |
| 111 | |
| 112 | enum hns_roce_event { |
| 113 | HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01, |
| 114 | HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02, |
| 115 | HNS_ROCE_EVENT_TYPE_COMM_EST = 0x03, |
| 116 | HNS_ROCE_EVENT_TYPE_SQ_DRAINED = 0x04, |
| 117 | HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, |
| 118 | HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR = 0x06, |
| 119 | HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR = 0x07, |
| 120 | HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH = 0x08, |
| 121 | HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH = 0x09, |
| 122 | HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR = 0x0a, |
| 123 | HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR = 0x0b, |
| 124 | HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW = 0x0c, |
| 125 | HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID = 0x0d, |
| 126 | HNS_ROCE_EVENT_TYPE_PORT_CHANGE = 0x0f, |
| 127 | /* 0x10 and 0x11 is unused in currently application case */ |
| 128 | HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12, |
| 129 | HNS_ROCE_EVENT_TYPE_MB = 0x13, |
| 130 | HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW = 0x14, |
| 131 | }; |
| 132 | |
| 133 | /* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */ |
| 134 | enum { |
| 135 | HNS_ROCE_LWQCE_QPC_ERROR = 1, |
| 136 | HNS_ROCE_LWQCE_MTU_ERROR = 2, |
| 137 | HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR = 3, |
| 138 | HNS_ROCE_LWQCE_WQE_ADDR_ERROR = 4, |
| 139 | HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR = 5, |
| 140 | HNS_ROCE_LWQCE_SL_ERROR = 6, |
| 141 | HNS_ROCE_LWQCE_PORT_ERROR = 7, |
| 142 | }; |
| 143 | |
| 144 | /* Local Access Violation Work Queue Error,SUBTYPE 0x7 */ |
| 145 | enum { |
| 146 | HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1, |
| 147 | HNS_ROCE_LAVWQE_LENGTH_ERROR = 2, |
| 148 | HNS_ROCE_LAVWQE_VA_ERROR = 3, |
| 149 | HNS_ROCE_LAVWQE_PD_ERROR = 4, |
| 150 | HNS_ROCE_LAVWQE_RW_ACC_ERROR = 5, |
| 151 | HNS_ROCE_LAVWQE_KEY_STATE_ERROR = 6, |
| 152 | HNS_ROCE_LAVWQE_MR_OPERATION_ERROR = 7, |
| 153 | }; |
| 154 | |
| 155 | /* DOORBELL overflow subtype */ |
| 156 | enum { |
| 157 | HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1, |
| 158 | HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF = 2, |
| 159 | HNS_ROCE_DB_SUBTYPE_ODB_OVF = 3, |
| 160 | HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF = 4, |
| 161 | HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP = 5, |
| 162 | HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP = 6, |
| 163 | }; |
| 164 | |
| 165 | enum { |
| 166 | /* RQ&SRQ related operations */ |
| 167 | HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06, |
| 168 | HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07, |
| 169 | }; |
| 170 | |
| 171 | #define HNS_ROCE_CMD_SUCCESS 1 |
| 172 | |
| 173 | #define HNS_ROCE_PORT_DOWN 0 |
| 174 | #define HNS_ROCE_PORT_UP 1 |
| 175 | |
| 176 | #define HNS_ROCE_MTT_ENTRY_PER_SEG 8 |
| 177 | |
| 178 | #define PAGE_ADDR_SHIFT 12 |
| 179 | |
| 180 | struct hns_roce_uar { |
| 181 | u64 pfn; |
| 182 | unsigned long index; |
| 183 | }; |
| 184 | |
| 185 | struct hns_roce_ucontext { |
| 186 | struct ib_ucontext ibucontext; |
| 187 | struct hns_roce_uar uar; |
| 188 | }; |
| 189 | |
| 190 | struct hns_roce_pd { |
| 191 | struct ib_pd ibpd; |
| 192 | unsigned long pdn; |
| 193 | }; |
| 194 | |
| 195 | struct hns_roce_bitmap { |
| 196 | /* Bitmap Traversal last a bit which is 1 */ |
| 197 | unsigned long last; |
| 198 | unsigned long top; |
| 199 | unsigned long max; |
| 200 | unsigned long reserved_top; |
| 201 | unsigned long mask; |
| 202 | spinlock_t lock; |
| 203 | unsigned long *table; |
| 204 | }; |
| 205 | |
| 206 | /* Order bitmap length -- bit num compute formula: 1 << (max_order - order) */ |
| 207 | /* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */ |
| 208 | /* Every bit repesent to a partner free/used status in bitmap */ |
| 209 | /* |
Salil | e84e40be | 2016-11-23 19:41:09 +0000 | [diff] [blame] | 210 | * Initial, bits of other bitmap are all 0 except that a bit of max_order is 1 |
| 211 | * Bit = 1 represent to idle and available; bit = 0: not available |
| 212 | */ |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 213 | struct hns_roce_buddy { |
| 214 | /* Members point to every order level bitmap */ |
| 215 | unsigned long **bits; |
| 216 | /* Represent to avail bits of the order level bitmap */ |
| 217 | u32 *num_free; |
| 218 | int max_order; |
| 219 | spinlock_t lock; |
| 220 | }; |
| 221 | |
| 222 | /* For Hardware Entry Memory */ |
| 223 | struct hns_roce_hem_table { |
| 224 | /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */ |
| 225 | u32 type; |
| 226 | /* HEM array elment num */ |
| 227 | unsigned long num_hem; |
| 228 | /* HEM entry record obj total num */ |
| 229 | unsigned long num_obj; |
| 230 | /*Single obj size */ |
| 231 | unsigned long obj_size; |
| 232 | int lowmem; |
| 233 | struct mutex mutex; |
| 234 | struct hns_roce_hem **hem; |
| 235 | }; |
| 236 | |
| 237 | struct hns_roce_mtt { |
| 238 | unsigned long first_seg; |
| 239 | int order; |
| 240 | int page_shift; |
| 241 | }; |
| 242 | |
| 243 | /* Only support 4K page size for mr register */ |
| 244 | #define MR_SIZE_4K 0 |
| 245 | |
| 246 | struct hns_roce_mr { |
| 247 | struct ib_mr ibmr; |
| 248 | struct ib_umem *umem; |
| 249 | u64 iova; /* MR's virtual orignal addr */ |
| 250 | u64 size; /* Address range of MR */ |
| 251 | u32 key; /* Key of MR */ |
| 252 | u32 pd; /* PD num of MR */ |
| 253 | u32 access;/* Access permission of MR */ |
| 254 | int enabled; /* MR's active status */ |
| 255 | int type; /* MR's register type */ |
| 256 | u64 *pbl_buf;/* MR's PBL space */ |
| 257 | dma_addr_t pbl_dma_addr; /* MR's PBL space PA */ |
| 258 | }; |
| 259 | |
| 260 | struct hns_roce_mr_table { |
| 261 | struct hns_roce_bitmap mtpt_bitmap; |
| 262 | struct hns_roce_buddy mtt_buddy; |
| 263 | struct hns_roce_hem_table mtt_table; |
| 264 | struct hns_roce_hem_table mtpt_table; |
| 265 | }; |
| 266 | |
| 267 | struct hns_roce_wq { |
| 268 | u64 *wrid; /* Work request ID */ |
| 269 | spinlock_t lock; |
| 270 | int wqe_cnt; /* WQE num */ |
| 271 | u32 max_post; |
| 272 | int max_gs; |
| 273 | int offset; |
| 274 | int wqe_shift;/* WQE size */ |
| 275 | u32 head; |
| 276 | u32 tail; |
| 277 | void __iomem *db_reg_l; |
| 278 | }; |
| 279 | |
| 280 | struct hns_roce_buf_list { |
| 281 | void *buf; |
| 282 | dma_addr_t map; |
| 283 | }; |
| 284 | |
| 285 | struct hns_roce_buf { |
| 286 | struct hns_roce_buf_list direct; |
| 287 | struct hns_roce_buf_list *page_list; |
| 288 | int nbufs; |
| 289 | u32 npages; |
| 290 | int page_shift; |
| 291 | }; |
| 292 | |
| 293 | struct hns_roce_cq_buf { |
| 294 | struct hns_roce_buf hr_buf; |
| 295 | struct hns_roce_mtt hr_mtt; |
| 296 | }; |
| 297 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 298 | struct hns_roce_cq { |
| 299 | struct ib_cq ib_cq; |
| 300 | struct hns_roce_cq_buf hr_buf; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 301 | spinlock_t lock; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 302 | struct ib_umem *umem; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 303 | void (*comp)(struct hns_roce_cq *); |
| 304 | void (*event)(struct hns_roce_cq *, enum hns_roce_event); |
| 305 | |
| 306 | struct hns_roce_uar *uar; |
| 307 | u32 cq_depth; |
| 308 | u32 cons_index; |
| 309 | void __iomem *cq_db_l; |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 310 | u16 *tptr_addr; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 311 | unsigned long cqn; |
| 312 | u32 vector; |
| 313 | atomic_t refcount; |
| 314 | struct completion free; |
| 315 | }; |
| 316 | |
| 317 | struct hns_roce_srq { |
| 318 | struct ib_srq ibsrq; |
| 319 | int srqn; |
| 320 | }; |
| 321 | |
| 322 | struct hns_roce_uar_table { |
| 323 | struct hns_roce_bitmap bitmap; |
| 324 | }; |
| 325 | |
| 326 | struct hns_roce_qp_table { |
| 327 | struct hns_roce_bitmap bitmap; |
| 328 | spinlock_t lock; |
| 329 | struct hns_roce_hem_table qp_table; |
| 330 | struct hns_roce_hem_table irrl_table; |
| 331 | }; |
| 332 | |
| 333 | struct hns_roce_cq_table { |
| 334 | struct hns_roce_bitmap bitmap; |
| 335 | spinlock_t lock; |
| 336 | struct radix_tree_root tree; |
| 337 | struct hns_roce_hem_table table; |
| 338 | }; |
| 339 | |
| 340 | struct hns_roce_raq_table { |
| 341 | struct hns_roce_buf_list *e_raq_buf; |
| 342 | }; |
| 343 | |
| 344 | struct hns_roce_av { |
| 345 | __le32 port_pd; |
| 346 | u8 gid_index; |
| 347 | u8 stat_rate; |
| 348 | u8 hop_limit; |
| 349 | __le32 sl_tclass_flowlabel; |
| 350 | u8 dgid[HNS_ROCE_GID_SIZE]; |
| 351 | u8 mac[6]; |
| 352 | __le16 vlan; |
| 353 | }; |
| 354 | |
| 355 | struct hns_roce_ah { |
| 356 | struct ib_ah ibah; |
| 357 | struct hns_roce_av av; |
| 358 | }; |
| 359 | |
| 360 | struct hns_roce_cmd_context { |
| 361 | struct completion done; |
| 362 | int result; |
| 363 | int next; |
| 364 | u64 out_param; |
| 365 | u16 token; |
| 366 | }; |
| 367 | |
| 368 | struct hns_roce_cmdq { |
| 369 | struct dma_pool *pool; |
| 370 | u8 __iomem *hcr; |
| 371 | struct mutex hcr_mutex; |
| 372 | struct semaphore poll_sem; |
| 373 | /* |
Salil | e84e40be | 2016-11-23 19:41:09 +0000 | [diff] [blame] | 374 | * Event mode: cmd register mutex protection, |
| 375 | * ensure to not exceed max_cmds and user use limit region |
| 376 | */ |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 377 | struct semaphore event_sem; |
| 378 | int max_cmds; |
| 379 | spinlock_t context_lock; |
| 380 | int free_head; |
| 381 | struct hns_roce_cmd_context *context; |
| 382 | /* |
Salil | e84e40be | 2016-11-23 19:41:09 +0000 | [diff] [blame] | 383 | * Result of get integer part |
| 384 | * which max_comds compute according a power of 2 |
| 385 | */ |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 386 | u16 token_mask; |
| 387 | /* |
Salil | e84e40be | 2016-11-23 19:41:09 +0000 | [diff] [blame] | 388 | * Process whether use event mode, init default non-zero |
| 389 | * After the event queue of cmd event ready, |
| 390 | * can switch into event mode |
| 391 | * close device, switch into poll mode(non event mode) |
| 392 | */ |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 393 | u8 use_events; |
| 394 | u8 toggle; |
| 395 | }; |
| 396 | |
Shaobo Xu | bfcc681 | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 397 | struct hns_roce_cmd_mailbox { |
| 398 | void *buf; |
| 399 | dma_addr_t dma; |
| 400 | }; |
| 401 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 402 | struct hns_roce_dev; |
| 403 | |
| 404 | struct hns_roce_qp { |
| 405 | struct ib_qp ibqp; |
| 406 | struct hns_roce_buf hr_buf; |
| 407 | struct hns_roce_wq rq; |
| 408 | __le64 doorbell_qpn; |
| 409 | __le32 sq_signal_bits; |
| 410 | u32 sq_next_wqe; |
| 411 | int sq_max_wqes_per_wr; |
| 412 | int sq_spare_wqes; |
| 413 | struct hns_roce_wq sq; |
| 414 | |
| 415 | struct ib_umem *umem; |
| 416 | struct hns_roce_mtt mtt; |
| 417 | u32 buff_size; |
| 418 | struct mutex mutex; |
| 419 | u8 port; |
Lijun Ou | 7716809 | 2016-09-15 23:48:10 +0100 | [diff] [blame] | 420 | u8 phy_port; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 421 | u8 sl; |
| 422 | u8 resp_depth; |
| 423 | u8 state; |
| 424 | u32 access_flags; |
| 425 | u32 pkey_index; |
| 426 | void (*event)(struct hns_roce_qp *, |
| 427 | enum hns_roce_event); |
| 428 | unsigned long qpn; |
| 429 | |
| 430 | atomic_t refcount; |
| 431 | struct completion free; |
| 432 | }; |
| 433 | |
| 434 | struct hns_roce_sqp { |
| 435 | struct hns_roce_qp hr_qp; |
| 436 | }; |
| 437 | |
| 438 | struct hns_roce_ib_iboe { |
| 439 | spinlock_t lock; |
| 440 | struct net_device *netdevs[HNS_ROCE_MAX_PORTS]; |
| 441 | struct notifier_block nb; |
| 442 | struct notifier_block nb_inet; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 443 | u8 phy_port[HNS_ROCE_MAX_PORTS]; |
| 444 | }; |
| 445 | |
| 446 | struct hns_roce_eq { |
| 447 | struct hns_roce_dev *hr_dev; |
| 448 | void __iomem *doorbell; |
| 449 | |
| 450 | int type_flag;/* Aeq:1 ceq:0 */ |
| 451 | int eqn; |
| 452 | u32 entries; |
| 453 | int log_entries; |
| 454 | int eqe_size; |
| 455 | int irq; |
| 456 | int log_page_size; |
| 457 | int cons_index; |
| 458 | struct hns_roce_buf_list *buf_list; |
| 459 | }; |
| 460 | |
| 461 | struct hns_roce_eq_table { |
| 462 | struct hns_roce_eq *eq; |
| 463 | void __iomem **eqc_base; |
| 464 | }; |
| 465 | |
| 466 | struct hns_roce_caps { |
| 467 | u8 num_ports; |
| 468 | int gid_table_len[HNS_ROCE_MAX_PORTS]; |
| 469 | int pkey_table_len[HNS_ROCE_MAX_PORTS]; |
| 470 | int local_ca_ack_delay; |
| 471 | int num_uars; |
| 472 | u32 phy_num_uars; |
| 473 | u32 max_sq_sg; /* 2 */ |
| 474 | u32 max_sq_inline; /* 32 */ |
| 475 | u32 max_rq_sg; /* 2 */ |
| 476 | int num_qps; /* 256k */ |
| 477 | u32 max_wqes; /* 16k */ |
| 478 | u32 max_sq_desc_sz; /* 64 */ |
| 479 | u32 max_rq_desc_sz; /* 64 */ |
| 480 | int max_qp_init_rdma; |
| 481 | int max_qp_dest_rdma; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 482 | int num_cqs; |
| 483 | int max_cqes; |
| 484 | int reserved_cqs; |
| 485 | int num_aeq_vectors; /* 1 */ |
| 486 | int num_comp_vectors; /* 32 ceq */ |
| 487 | int num_other_vectors; |
| 488 | int num_mtpts; |
| 489 | u32 num_mtt_segs; |
| 490 | int reserved_mrws; |
| 491 | int reserved_uars; |
| 492 | int num_pds; |
| 493 | int reserved_pds; |
| 494 | u32 mtt_entry_sz; |
| 495 | u32 cq_entry_sz; |
| 496 | u32 page_size_cap; |
| 497 | u32 reserved_lkey; |
| 498 | int mtpt_entry_sz; |
| 499 | int qpc_entry_sz; |
| 500 | int irrl_entry_sz; |
| 501 | int cqc_entry_sz; |
| 502 | int aeqe_depth; |
| 503 | int ceqe_depth[HNS_ROCE_COMP_VEC_NUM]; |
| 504 | enum ib_mtu max_mtu; |
| 505 | }; |
| 506 | |
| 507 | struct hns_roce_hw { |
| 508 | int (*reset)(struct hns_roce_dev *hr_dev, bool enable); |
| 509 | void (*hw_profile)(struct hns_roce_dev *hr_dev); |
| 510 | int (*hw_init)(struct hns_roce_dev *hr_dev); |
| 511 | void (*hw_exit)(struct hns_roce_dev *hr_dev); |
| 512 | void (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index, |
| 513 | union ib_gid *gid); |
| 514 | void (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr); |
| 515 | void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port, |
| 516 | enum ib_mtu mtu); |
| 517 | int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr, |
| 518 | unsigned long mtpt_idx); |
| 519 | void (*write_cqc)(struct hns_roce_dev *hr_dev, |
| 520 | struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, |
| 521 | dma_addr_t dma_handle, int nent, u32 vector); |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 522 | int (*clear_hem)(struct hns_roce_dev *hr_dev, |
| 523 | struct hns_roce_hem_table *table, int obj); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 524 | int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, |
| 525 | int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); |
| 526 | int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, |
| 527 | int attr_mask, enum ib_qp_state cur_state, |
| 528 | enum ib_qp_state new_state); |
| 529 | int (*destroy_qp)(struct ib_qp *ibqp); |
| 530 | int (*post_send)(struct ib_qp *ibqp, struct ib_send_wr *wr, |
| 531 | struct ib_send_wr **bad_wr); |
| 532 | int (*post_recv)(struct ib_qp *qp, struct ib_recv_wr *recv_wr, |
| 533 | struct ib_recv_wr **bad_recv_wr); |
| 534 | int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); |
| 535 | int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
Shaobo Xu | bfcc681 | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 536 | int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr); |
Shaobo Xu | afb6b09 | 2016-11-29 23:10:29 +0000 | [diff] [blame^] | 537 | int (*destroy_cq)(struct ib_cq *ibcq); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 538 | void *priv; |
| 539 | }; |
| 540 | |
| 541 | struct hns_roce_dev { |
| 542 | struct ib_device ib_dev; |
| 543 | struct platform_device *pdev; |
| 544 | struct hns_roce_uar priv_uar; |
Salil | 528f1de | 2016-08-24 04:44:50 +0800 | [diff] [blame] | 545 | const char *irq_names[HNS_ROCE_MAX_IRQ_NUM]; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 546 | spinlock_t sm_lock; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 547 | spinlock_t bt_cmd_lock; |
| 548 | struct hns_roce_ib_iboe iboe; |
| 549 | |
| 550 | int irq[HNS_ROCE_MAX_IRQ_NUM]; |
| 551 | u8 __iomem *reg_base; |
| 552 | struct hns_roce_caps caps; |
| 553 | struct radix_tree_root qp_table_tree; |
| 554 | |
| 555 | unsigned char dev_addr[HNS_ROCE_MAX_PORTS][MAC_ADDR_OCTET_NUM]; |
| 556 | u64 sys_image_guid; |
| 557 | u32 vendor_id; |
| 558 | u32 vendor_part_id; |
| 559 | u32 hw_rev; |
| 560 | void __iomem *priv_addr; |
| 561 | |
| 562 | struct hns_roce_cmdq cmd; |
| 563 | struct hns_roce_bitmap pd_bitmap; |
| 564 | struct hns_roce_uar_table uar_table; |
| 565 | struct hns_roce_mr_table mr_table; |
| 566 | struct hns_roce_cq_table cq_table; |
| 567 | struct hns_roce_qp_table qp_table; |
| 568 | struct hns_roce_eq_table eq_table; |
| 569 | |
| 570 | int cmd_mod; |
| 571 | int loop_idc; |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 572 | dma_addr_t tptr_dma_addr; /*only for hw v1*/ |
| 573 | u32 tptr_size; /*only for hw v1*/ |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 574 | struct hns_roce_hw *hw; |
| 575 | }; |
| 576 | |
| 577 | static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) |
| 578 | { |
| 579 | return container_of(ib_dev, struct hns_roce_dev, ib_dev); |
| 580 | } |
| 581 | |
| 582 | static inline struct hns_roce_ucontext |
| 583 | *to_hr_ucontext(struct ib_ucontext *ibucontext) |
| 584 | { |
| 585 | return container_of(ibucontext, struct hns_roce_ucontext, ibucontext); |
| 586 | } |
| 587 | |
| 588 | static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd) |
| 589 | { |
| 590 | return container_of(ibpd, struct hns_roce_pd, ibpd); |
| 591 | } |
| 592 | |
| 593 | static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah) |
| 594 | { |
| 595 | return container_of(ibah, struct hns_roce_ah, ibah); |
| 596 | } |
| 597 | |
| 598 | static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr) |
| 599 | { |
| 600 | return container_of(ibmr, struct hns_roce_mr, ibmr); |
| 601 | } |
| 602 | |
| 603 | static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp) |
| 604 | { |
| 605 | return container_of(ibqp, struct hns_roce_qp, ibqp); |
| 606 | } |
| 607 | |
| 608 | static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq) |
| 609 | { |
| 610 | return container_of(ib_cq, struct hns_roce_cq, ib_cq); |
| 611 | } |
| 612 | |
| 613 | static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq) |
| 614 | { |
| 615 | return container_of(ibsrq, struct hns_roce_srq, ibsrq); |
| 616 | } |
| 617 | |
| 618 | static inline struct hns_roce_sqp *hr_to_hr_sqp(struct hns_roce_qp *hr_qp) |
| 619 | { |
| 620 | return container_of(hr_qp, struct hns_roce_sqp, hr_qp); |
| 621 | } |
| 622 | |
| 623 | static inline void hns_roce_write64_k(__be32 val[2], void __iomem *dest) |
| 624 | { |
| 625 | __raw_writeq(*(u64 *) val, dest); |
| 626 | } |
| 627 | |
| 628 | static inline struct hns_roce_qp |
| 629 | *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn) |
| 630 | { |
| 631 | return radix_tree_lookup(&hr_dev->qp_table_tree, |
| 632 | qpn & (hr_dev->caps.num_qps - 1)); |
| 633 | } |
| 634 | |
| 635 | static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset) |
| 636 | { |
| 637 | u32 bits_per_long_val = BITS_PER_LONG; |
| 638 | |
| 639 | if (bits_per_long_val == 64 || buf->nbufs == 1) |
| 640 | return (char *)(buf->direct.buf) + offset; |
| 641 | else |
| 642 | return (char *)(buf->page_list[offset >> PAGE_SHIFT].buf) + |
| 643 | (offset & (PAGE_SIZE - 1)); |
| 644 | } |
| 645 | |
| 646 | int hns_roce_init_uar_table(struct hns_roce_dev *dev); |
| 647 | int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar); |
| 648 | void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar); |
| 649 | void hns_roce_cleanup_uar_table(struct hns_roce_dev *dev); |
| 650 | |
| 651 | int hns_roce_cmd_init(struct hns_roce_dev *hr_dev); |
| 652 | void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev); |
| 653 | void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, |
| 654 | u64 out_param); |
| 655 | int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev); |
| 656 | void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev); |
| 657 | |
| 658 | int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, |
| 659 | struct hns_roce_mtt *mtt); |
| 660 | void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, |
| 661 | struct hns_roce_mtt *mtt); |
| 662 | int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev, |
| 663 | struct hns_roce_mtt *mtt, struct hns_roce_buf *buf); |
| 664 | |
| 665 | int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev); |
| 666 | int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev); |
| 667 | int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev); |
| 668 | int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev); |
| 669 | int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev); |
| 670 | |
| 671 | void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev); |
| 672 | void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev); |
| 673 | void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev); |
| 674 | void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev); |
| 675 | void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev); |
| 676 | |
| 677 | int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj); |
Wei Hu (Xavier) | 5e6ff78 | 2016-11-23 19:41:07 +0000 | [diff] [blame] | 678 | void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj, |
| 679 | int rr); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 680 | int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask, |
| 681 | u32 reserved_bot, u32 resetrved_top); |
| 682 | void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap); |
| 683 | void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev); |
| 684 | int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt, |
| 685 | int align, unsigned long *obj); |
| 686 | void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap, |
Wei Hu (Xavier) | 5e6ff78 | 2016-11-23 19:41:07 +0000 | [diff] [blame] | 687 | unsigned long obj, int cnt, |
| 688 | int rr); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 689 | |
| 690 | struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); |
| 691 | int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); |
| 692 | int hns_roce_destroy_ah(struct ib_ah *ah); |
| 693 | |
| 694 | struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, |
| 695 | struct ib_ucontext *context, |
| 696 | struct ib_udata *udata); |
| 697 | int hns_roce_dealloc_pd(struct ib_pd *pd); |
| 698 | |
| 699 | struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); |
| 700 | struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
| 701 | u64 virt_addr, int access_flags, |
| 702 | struct ib_udata *udata); |
| 703 | int hns_roce_dereg_mr(struct ib_mr *ibmr); |
Shaobo Xu | bfcc681 | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 704 | int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev, |
| 705 | struct hns_roce_cmd_mailbox *mailbox, |
| 706 | unsigned long mpt_index); |
| 707 | unsigned long key_to_hw_index(u32 key); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 708 | |
| 709 | void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size, |
| 710 | struct hns_roce_buf *buf); |
| 711 | int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, |
| 712 | struct hns_roce_buf *buf); |
| 713 | |
| 714 | int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev, |
| 715 | struct hns_roce_mtt *mtt, struct ib_umem *umem); |
| 716 | |
| 717 | struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, |
| 718 | struct ib_qp_init_attr *init_attr, |
| 719 | struct ib_udata *udata); |
| 720 | int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
| 721 | int attr_mask, struct ib_udata *udata); |
| 722 | void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n); |
| 723 | void *get_send_wqe(struct hns_roce_qp *hr_qp, int n); |
| 724 | bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, |
| 725 | struct ib_cq *ib_cq); |
| 726 | enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state); |
| 727 | void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, |
| 728 | struct hns_roce_cq *recv_cq); |
| 729 | void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, |
| 730 | struct hns_roce_cq *recv_cq); |
| 731 | void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); |
| 732 | void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); |
| 733 | void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, |
| 734 | int cnt); |
| 735 | __be32 send_ieth(struct ib_send_wr *wr); |
| 736 | int to_hr_qp_type(int qp_type); |
| 737 | |
| 738 | struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, |
| 739 | const struct ib_cq_init_attr *attr, |
| 740 | struct ib_ucontext *context, |
| 741 | struct ib_udata *udata); |
| 742 | |
| 743 | int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq); |
Shaobo Xu | afb6b09 | 2016-11-29 23:10:29 +0000 | [diff] [blame^] | 744 | void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 745 | |
| 746 | void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); |
| 747 | void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); |
| 748 | void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type); |
| 749 | int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index); |
| 750 | |
| 751 | extern struct hns_roce_hw hns_roce_hw_v1; |
| 752 | |
| 753 | #endif /* _HNS_ROCE_DEVICE_H */ |