blob: 1050829da64227ce53f4bc575795d29d82a35d1c [file] [log] [blame]
oulijun9a443532016-07-21 19:06:38 +08001/*
2 * Copyright (c) 2016 Hisilicon Limited.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef _HNS_ROCE_DEVICE_H
34#define _HNS_ROCE_DEVICE_H
35
36#include <rdma/ib_verbs.h>
37
38#define DRV_NAME "hns_roce"
39
Wei Hu (Xavier)8f3e9f32016-11-23 19:41:00 +000040#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
41
oulijun9a443532016-07-21 19:06:38 +080042#define MAC_ADDR_OCTET_NUM 6
43#define HNS_ROCE_MAX_MSG_LEN 0x80000000
44
45#define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
46
47#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
48
49#define HNS_ROCE_BA_SIZE (32 * 4096)
50
51/* Hardware specification only for v1 engine */
52#define HNS_ROCE_MIN_CQE_NUM 0x40
53#define HNS_ROCE_MIN_WQE_NUM 0x20
54
55/* Hardware specification only for v1 engine */
56#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
57#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
58
59#define HNS_ROCE_MAX_IRQ_NUM 34
60
61#define HNS_ROCE_COMP_VEC_NUM 32
62
63#define HNS_ROCE_AEQE_VEC_NUM 1
64#define HNS_ROCE_AEQE_OF_VEC_NUM 1
65
66/* 4G/4K = 1M */
Lijun Ouac111252016-09-15 23:48:13 +010067#define HNS_ROCE_SL_SHIFT 28
oulijun9a443532016-07-21 19:06:38 +080068#define HNS_ROCE_TCLASS_SHIFT 20
69#define HNS_ROCE_FLOW_LABLE_MASK 0xfffff
70
71#define HNS_ROCE_MAX_PORTS 6
72#define HNS_ROCE_MAX_GID_NUM 16
73#define HNS_ROCE_GID_SIZE 16
74
Wei Hu (Xavier)5e6ff782016-11-23 19:41:07 +000075#define BITMAP_NO_RR 0
76#define BITMAP_RR 1
77
oulijun9a443532016-07-21 19:06:38 +080078#define MR_TYPE_MR 0x00
79#define MR_TYPE_DMA 0x03
80
81#define PKEY_ID 0xffff
Lijun Ou31644662016-09-15 23:48:07 +010082#define GUID_LEN 8
oulijun9a443532016-07-21 19:06:38 +080083#define NODE_DESC_SIZE 64
Lijun Ou509bf0c2016-09-15 23:48:12 +010084#define DB_REG_OFFSET 0x1000
oulijun9a443532016-07-21 19:06:38 +080085
86#define SERV_TYPE_RC 0
87#define SERV_TYPE_RD 1
88#define SERV_TYPE_UC 2
89#define SERV_TYPE_UD 3
90
91#define PAGES_SHIFT_8 8
92#define PAGES_SHIFT_16 16
93#define PAGES_SHIFT_24 24
94#define PAGES_SHIFT_32 32
95
96enum hns_roce_qp_state {
97 HNS_ROCE_QP_STATE_RST,
98 HNS_ROCE_QP_STATE_INIT,
99 HNS_ROCE_QP_STATE_RTR,
100 HNS_ROCE_QP_STATE_RTS,
101 HNS_ROCE_QP_STATE_SQD,
102 HNS_ROCE_QP_STATE_ERR,
103 HNS_ROCE_QP_NUM_STATE,
104};
105
106enum hns_roce_event {
107 HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01,
108 HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02,
109 HNS_ROCE_EVENT_TYPE_COMM_EST = 0x03,
110 HNS_ROCE_EVENT_TYPE_SQ_DRAINED = 0x04,
111 HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
112 HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR = 0x06,
113 HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR = 0x07,
114 HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH = 0x08,
115 HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH = 0x09,
116 HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR = 0x0a,
117 HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR = 0x0b,
118 HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW = 0x0c,
119 HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID = 0x0d,
120 HNS_ROCE_EVENT_TYPE_PORT_CHANGE = 0x0f,
121 /* 0x10 and 0x11 is unused in currently application case */
122 HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
123 HNS_ROCE_EVENT_TYPE_MB = 0x13,
124 HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW = 0x14,
125};
126
127/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
128enum {
129 HNS_ROCE_LWQCE_QPC_ERROR = 1,
130 HNS_ROCE_LWQCE_MTU_ERROR = 2,
131 HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR = 3,
132 HNS_ROCE_LWQCE_WQE_ADDR_ERROR = 4,
133 HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR = 5,
134 HNS_ROCE_LWQCE_SL_ERROR = 6,
135 HNS_ROCE_LWQCE_PORT_ERROR = 7,
136};
137
138/* Local Access Violation Work Queue Error,SUBTYPE 0x7 */
139enum {
140 HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1,
141 HNS_ROCE_LAVWQE_LENGTH_ERROR = 2,
142 HNS_ROCE_LAVWQE_VA_ERROR = 3,
143 HNS_ROCE_LAVWQE_PD_ERROR = 4,
144 HNS_ROCE_LAVWQE_RW_ACC_ERROR = 5,
145 HNS_ROCE_LAVWQE_KEY_STATE_ERROR = 6,
146 HNS_ROCE_LAVWQE_MR_OPERATION_ERROR = 7,
147};
148
149/* DOORBELL overflow subtype */
150enum {
151 HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1,
152 HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF = 2,
153 HNS_ROCE_DB_SUBTYPE_ODB_OVF = 3,
154 HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF = 4,
155 HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP = 5,
156 HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP = 6,
157};
158
159enum {
160 /* RQ&SRQ related operations */
161 HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06,
162 HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
163};
164
165#define HNS_ROCE_CMD_SUCCESS 1
166
167#define HNS_ROCE_PORT_DOWN 0
168#define HNS_ROCE_PORT_UP 1
169
170#define HNS_ROCE_MTT_ENTRY_PER_SEG 8
171
172#define PAGE_ADDR_SHIFT 12
173
174struct hns_roce_uar {
175 u64 pfn;
176 unsigned long index;
177};
178
179struct hns_roce_ucontext {
180 struct ib_ucontext ibucontext;
181 struct hns_roce_uar uar;
182};
183
184struct hns_roce_pd {
185 struct ib_pd ibpd;
186 unsigned long pdn;
187};
188
189struct hns_roce_bitmap {
190 /* Bitmap Traversal last a bit which is 1 */
191 unsigned long last;
192 unsigned long top;
193 unsigned long max;
194 unsigned long reserved_top;
195 unsigned long mask;
196 spinlock_t lock;
197 unsigned long *table;
198};
199
200/* Order bitmap length -- bit num compute formula: 1 << (max_order - order) */
201/* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */
202/* Every bit repesent to a partner free/used status in bitmap */
203/*
Salile84e40be2016-11-23 19:41:09 +0000204 * Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
205 * Bit = 1 represent to idle and available; bit = 0: not available
206 */
oulijun9a443532016-07-21 19:06:38 +0800207struct hns_roce_buddy {
208 /* Members point to every order level bitmap */
209 unsigned long **bits;
210 /* Represent to avail bits of the order level bitmap */
211 u32 *num_free;
212 int max_order;
213 spinlock_t lock;
214};
215
216/* For Hardware Entry Memory */
217struct hns_roce_hem_table {
218 /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
219 u32 type;
220 /* HEM array elment num */
221 unsigned long num_hem;
222 /* HEM entry record obj total num */
223 unsigned long num_obj;
224 /*Single obj size */
225 unsigned long obj_size;
226 int lowmem;
227 struct mutex mutex;
228 struct hns_roce_hem **hem;
229};
230
231struct hns_roce_mtt {
232 unsigned long first_seg;
233 int order;
234 int page_shift;
235};
236
237/* Only support 4K page size for mr register */
238#define MR_SIZE_4K 0
239
240struct hns_roce_mr {
241 struct ib_mr ibmr;
242 struct ib_umem *umem;
243 u64 iova; /* MR's virtual orignal addr */
244 u64 size; /* Address range of MR */
245 u32 key; /* Key of MR */
246 u32 pd; /* PD num of MR */
247 u32 access;/* Access permission of MR */
248 int enabled; /* MR's active status */
249 int type; /* MR's register type */
250 u64 *pbl_buf;/* MR's PBL space */
251 dma_addr_t pbl_dma_addr; /* MR's PBL space PA */
252};
253
254struct hns_roce_mr_table {
255 struct hns_roce_bitmap mtpt_bitmap;
256 struct hns_roce_buddy mtt_buddy;
257 struct hns_roce_hem_table mtt_table;
258 struct hns_roce_hem_table mtpt_table;
259};
260
261struct hns_roce_wq {
262 u64 *wrid; /* Work request ID */
263 spinlock_t lock;
264 int wqe_cnt; /* WQE num */
265 u32 max_post;
266 int max_gs;
267 int offset;
268 int wqe_shift;/* WQE size */
269 u32 head;
270 u32 tail;
271 void __iomem *db_reg_l;
272};
273
274struct hns_roce_buf_list {
275 void *buf;
276 dma_addr_t map;
277};
278
279struct hns_roce_buf {
280 struct hns_roce_buf_list direct;
281 struct hns_roce_buf_list *page_list;
282 int nbufs;
283 u32 npages;
284 int page_shift;
285};
286
287struct hns_roce_cq_buf {
288 struct hns_roce_buf hr_buf;
289 struct hns_roce_mtt hr_mtt;
290};
291
oulijun9a443532016-07-21 19:06:38 +0800292struct hns_roce_cq {
293 struct ib_cq ib_cq;
294 struct hns_roce_cq_buf hr_buf;
oulijun9a443532016-07-21 19:06:38 +0800295 spinlock_t lock;
oulijun9a443532016-07-21 19:06:38 +0800296 struct ib_umem *umem;
oulijun9a443532016-07-21 19:06:38 +0800297 void (*comp)(struct hns_roce_cq *);
298 void (*event)(struct hns_roce_cq *, enum hns_roce_event);
299
300 struct hns_roce_uar *uar;
301 u32 cq_depth;
302 u32 cons_index;
303 void __iomem *cq_db_l;
Wei Hu (Xavier)8f3e9f32016-11-23 19:41:00 +0000304 u16 *tptr_addr;
oulijun9a443532016-07-21 19:06:38 +0800305 unsigned long cqn;
306 u32 vector;
307 atomic_t refcount;
308 struct completion free;
309};
310
311struct hns_roce_srq {
312 struct ib_srq ibsrq;
313 int srqn;
314};
315
316struct hns_roce_uar_table {
317 struct hns_roce_bitmap bitmap;
318};
319
320struct hns_roce_qp_table {
321 struct hns_roce_bitmap bitmap;
322 spinlock_t lock;
323 struct hns_roce_hem_table qp_table;
324 struct hns_roce_hem_table irrl_table;
325};
326
327struct hns_roce_cq_table {
328 struct hns_roce_bitmap bitmap;
329 spinlock_t lock;
330 struct radix_tree_root tree;
331 struct hns_roce_hem_table table;
332};
333
334struct hns_roce_raq_table {
335 struct hns_roce_buf_list *e_raq_buf;
336};
337
338struct hns_roce_av {
339 __le32 port_pd;
340 u8 gid_index;
341 u8 stat_rate;
342 u8 hop_limit;
343 __le32 sl_tclass_flowlabel;
344 u8 dgid[HNS_ROCE_GID_SIZE];
345 u8 mac[6];
346 __le16 vlan;
347};
348
349struct hns_roce_ah {
350 struct ib_ah ibah;
351 struct hns_roce_av av;
352};
353
354struct hns_roce_cmd_context {
355 struct completion done;
356 int result;
357 int next;
358 u64 out_param;
359 u16 token;
360};
361
362struct hns_roce_cmdq {
363 struct dma_pool *pool;
364 u8 __iomem *hcr;
365 struct mutex hcr_mutex;
366 struct semaphore poll_sem;
367 /*
Salile84e40be2016-11-23 19:41:09 +0000368 * Event mode: cmd register mutex protection,
369 * ensure to not exceed max_cmds and user use limit region
370 */
oulijun9a443532016-07-21 19:06:38 +0800371 struct semaphore event_sem;
372 int max_cmds;
373 spinlock_t context_lock;
374 int free_head;
375 struct hns_roce_cmd_context *context;
376 /*
Salile84e40be2016-11-23 19:41:09 +0000377 * Result of get integer part
378 * which max_comds compute according a power of 2
379 */
oulijun9a443532016-07-21 19:06:38 +0800380 u16 token_mask;
381 /*
Salile84e40be2016-11-23 19:41:09 +0000382 * Process whether use event mode, init default non-zero
383 * After the event queue of cmd event ready,
384 * can switch into event mode
385 * close device, switch into poll mode(non event mode)
386 */
oulijun9a443532016-07-21 19:06:38 +0800387 u8 use_events;
388 u8 toggle;
389};
390
Shaobo Xubfcc6812016-11-29 23:10:26 +0000391struct hns_roce_cmd_mailbox {
392 void *buf;
393 dma_addr_t dma;
394};
395
oulijun9a443532016-07-21 19:06:38 +0800396struct hns_roce_dev;
397
398struct hns_roce_qp {
399 struct ib_qp ibqp;
400 struct hns_roce_buf hr_buf;
401 struct hns_roce_wq rq;
402 __le64 doorbell_qpn;
403 __le32 sq_signal_bits;
404 u32 sq_next_wqe;
405 int sq_max_wqes_per_wr;
406 int sq_spare_wqes;
407 struct hns_roce_wq sq;
408
409 struct ib_umem *umem;
410 struct hns_roce_mtt mtt;
411 u32 buff_size;
412 struct mutex mutex;
413 u8 port;
Lijun Ou77168092016-09-15 23:48:10 +0100414 u8 phy_port;
oulijun9a443532016-07-21 19:06:38 +0800415 u8 sl;
416 u8 resp_depth;
417 u8 state;
418 u32 access_flags;
419 u32 pkey_index;
420 void (*event)(struct hns_roce_qp *,
421 enum hns_roce_event);
422 unsigned long qpn;
423
424 atomic_t refcount;
425 struct completion free;
426};
427
428struct hns_roce_sqp {
429 struct hns_roce_qp hr_qp;
430};
431
432struct hns_roce_ib_iboe {
433 spinlock_t lock;
434 struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
435 struct notifier_block nb;
436 struct notifier_block nb_inet;
oulijun9a443532016-07-21 19:06:38 +0800437 u8 phy_port[HNS_ROCE_MAX_PORTS];
438};
439
440struct hns_roce_eq {
441 struct hns_roce_dev *hr_dev;
442 void __iomem *doorbell;
443
444 int type_flag;/* Aeq:1 ceq:0 */
445 int eqn;
446 u32 entries;
447 int log_entries;
448 int eqe_size;
449 int irq;
450 int log_page_size;
451 int cons_index;
452 struct hns_roce_buf_list *buf_list;
453};
454
455struct hns_roce_eq_table {
456 struct hns_roce_eq *eq;
457 void __iomem **eqc_base;
458};
459
460struct hns_roce_caps {
461 u8 num_ports;
462 int gid_table_len[HNS_ROCE_MAX_PORTS];
463 int pkey_table_len[HNS_ROCE_MAX_PORTS];
464 int local_ca_ack_delay;
465 int num_uars;
466 u32 phy_num_uars;
467 u32 max_sq_sg; /* 2 */
468 u32 max_sq_inline; /* 32 */
469 u32 max_rq_sg; /* 2 */
470 int num_qps; /* 256k */
471 u32 max_wqes; /* 16k */
472 u32 max_sq_desc_sz; /* 64 */
473 u32 max_rq_desc_sz; /* 64 */
474 int max_qp_init_rdma;
475 int max_qp_dest_rdma;
oulijun9a443532016-07-21 19:06:38 +0800476 int num_cqs;
477 int max_cqes;
478 int reserved_cqs;
479 int num_aeq_vectors; /* 1 */
480 int num_comp_vectors; /* 32 ceq */
481 int num_other_vectors;
482 int num_mtpts;
483 u32 num_mtt_segs;
484 int reserved_mrws;
485 int reserved_uars;
486 int num_pds;
487 int reserved_pds;
488 u32 mtt_entry_sz;
489 u32 cq_entry_sz;
490 u32 page_size_cap;
491 u32 reserved_lkey;
492 int mtpt_entry_sz;
493 int qpc_entry_sz;
494 int irrl_entry_sz;
495 int cqc_entry_sz;
496 int aeqe_depth;
497 int ceqe_depth[HNS_ROCE_COMP_VEC_NUM];
498 enum ib_mtu max_mtu;
499};
500
501struct hns_roce_hw {
502 int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
503 void (*hw_profile)(struct hns_roce_dev *hr_dev);
504 int (*hw_init)(struct hns_roce_dev *hr_dev);
505 void (*hw_exit)(struct hns_roce_dev *hr_dev);
506 void (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
507 union ib_gid *gid);
508 void (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
509 void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
510 enum ib_mtu mtu);
511 int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
512 unsigned long mtpt_idx);
513 void (*write_cqc)(struct hns_roce_dev *hr_dev,
514 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
515 dma_addr_t dma_handle, int nent, u32 vector);
Wei Hu (Xavier)97f0e392016-09-20 17:06:59 +0100516 int (*clear_hem)(struct hns_roce_dev *hr_dev,
517 struct hns_roce_hem_table *table, int obj);
oulijun9a443532016-07-21 19:06:38 +0800518 int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
519 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
520 int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
521 int attr_mask, enum ib_qp_state cur_state,
522 enum ib_qp_state new_state);
523 int (*destroy_qp)(struct ib_qp *ibqp);
524 int (*post_send)(struct ib_qp *ibqp, struct ib_send_wr *wr,
525 struct ib_send_wr **bad_wr);
526 int (*post_recv)(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
527 struct ib_recv_wr **bad_recv_wr);
528 int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
529 int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
Shaobo Xubfcc6812016-11-29 23:10:26 +0000530 int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
oulijun9a443532016-07-21 19:06:38 +0800531 void *priv;
532};
533
534struct hns_roce_dev {
535 struct ib_device ib_dev;
536 struct platform_device *pdev;
537 struct hns_roce_uar priv_uar;
Salil528f1de2016-08-24 04:44:50 +0800538 const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
oulijun9a443532016-07-21 19:06:38 +0800539 spinlock_t sm_lock;
oulijun9a443532016-07-21 19:06:38 +0800540 spinlock_t bt_cmd_lock;
541 struct hns_roce_ib_iboe iboe;
542
543 int irq[HNS_ROCE_MAX_IRQ_NUM];
544 u8 __iomem *reg_base;
545 struct hns_roce_caps caps;
546 struct radix_tree_root qp_table_tree;
547
548 unsigned char dev_addr[HNS_ROCE_MAX_PORTS][MAC_ADDR_OCTET_NUM];
549 u64 sys_image_guid;
550 u32 vendor_id;
551 u32 vendor_part_id;
552 u32 hw_rev;
553 void __iomem *priv_addr;
554
555 struct hns_roce_cmdq cmd;
556 struct hns_roce_bitmap pd_bitmap;
557 struct hns_roce_uar_table uar_table;
558 struct hns_roce_mr_table mr_table;
559 struct hns_roce_cq_table cq_table;
560 struct hns_roce_qp_table qp_table;
561 struct hns_roce_eq_table eq_table;
562
563 int cmd_mod;
564 int loop_idc;
Wei Hu (Xavier)8f3e9f32016-11-23 19:41:00 +0000565 dma_addr_t tptr_dma_addr; /*only for hw v1*/
566 u32 tptr_size; /*only for hw v1*/
oulijun9a443532016-07-21 19:06:38 +0800567 struct hns_roce_hw *hw;
568};
569
570static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
571{
572 return container_of(ib_dev, struct hns_roce_dev, ib_dev);
573}
574
575static inline struct hns_roce_ucontext
576 *to_hr_ucontext(struct ib_ucontext *ibucontext)
577{
578 return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
579}
580
581static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
582{
583 return container_of(ibpd, struct hns_roce_pd, ibpd);
584}
585
586static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
587{
588 return container_of(ibah, struct hns_roce_ah, ibah);
589}
590
591static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
592{
593 return container_of(ibmr, struct hns_roce_mr, ibmr);
594}
595
596static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
597{
598 return container_of(ibqp, struct hns_roce_qp, ibqp);
599}
600
601static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
602{
603 return container_of(ib_cq, struct hns_roce_cq, ib_cq);
604}
605
606static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
607{
608 return container_of(ibsrq, struct hns_roce_srq, ibsrq);
609}
610
611static inline struct hns_roce_sqp *hr_to_hr_sqp(struct hns_roce_qp *hr_qp)
612{
613 return container_of(hr_qp, struct hns_roce_sqp, hr_qp);
614}
615
616static inline void hns_roce_write64_k(__be32 val[2], void __iomem *dest)
617{
618 __raw_writeq(*(u64 *) val, dest);
619}
620
621static inline struct hns_roce_qp
622 *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
623{
624 return radix_tree_lookup(&hr_dev->qp_table_tree,
625 qpn & (hr_dev->caps.num_qps - 1));
626}
627
628static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
629{
630 u32 bits_per_long_val = BITS_PER_LONG;
631
632 if (bits_per_long_val == 64 || buf->nbufs == 1)
633 return (char *)(buf->direct.buf) + offset;
634 else
635 return (char *)(buf->page_list[offset >> PAGE_SHIFT].buf) +
636 (offset & (PAGE_SIZE - 1));
637}
638
639int hns_roce_init_uar_table(struct hns_roce_dev *dev);
640int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
641void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
642void hns_roce_cleanup_uar_table(struct hns_roce_dev *dev);
643
644int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
645void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
646void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
647 u64 out_param);
648int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
649void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
650
651int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
652 struct hns_roce_mtt *mtt);
653void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev,
654 struct hns_roce_mtt *mtt);
655int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
656 struct hns_roce_mtt *mtt, struct hns_roce_buf *buf);
657
658int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
659int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
660int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev);
661int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
662int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
663
664void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev);
665void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev);
666void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
667void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
668void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
669
670int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
Wei Hu (Xavier)5e6ff782016-11-23 19:41:07 +0000671void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
672 int rr);
oulijun9a443532016-07-21 19:06:38 +0800673int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
674 u32 reserved_bot, u32 resetrved_top);
675void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap);
676void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
677int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
678 int align, unsigned long *obj);
679void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
Wei Hu (Xavier)5e6ff782016-11-23 19:41:07 +0000680 unsigned long obj, int cnt,
681 int rr);
oulijun9a443532016-07-21 19:06:38 +0800682
683struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
684int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
685int hns_roce_destroy_ah(struct ib_ah *ah);
686
687struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
688 struct ib_ucontext *context,
689 struct ib_udata *udata);
690int hns_roce_dealloc_pd(struct ib_pd *pd);
691
692struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
693struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
694 u64 virt_addr, int access_flags,
695 struct ib_udata *udata);
696int hns_roce_dereg_mr(struct ib_mr *ibmr);
Shaobo Xubfcc6812016-11-29 23:10:26 +0000697int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
698 struct hns_roce_cmd_mailbox *mailbox,
699 unsigned long mpt_index);
700unsigned long key_to_hw_index(u32 key);
oulijun9a443532016-07-21 19:06:38 +0800701
702void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
703 struct hns_roce_buf *buf);
704int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
705 struct hns_roce_buf *buf);
706
707int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
708 struct hns_roce_mtt *mtt, struct ib_umem *umem);
709
710struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
711 struct ib_qp_init_attr *init_attr,
712 struct ib_udata *udata);
713int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
714 int attr_mask, struct ib_udata *udata);
715void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
716void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
717bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
718 struct ib_cq *ib_cq);
719enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
720void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
721 struct hns_roce_cq *recv_cq);
722void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
723 struct hns_roce_cq *recv_cq);
724void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
725void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
726void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
727 int cnt);
728__be32 send_ieth(struct ib_send_wr *wr);
729int to_hr_qp_type(int qp_type);
730
731struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
732 const struct ib_cq_init_attr *attr,
733 struct ib_ucontext *context,
734 struct ib_udata *udata);
735
736int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq);
737
738void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
739void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
740void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
741int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
742
743extern struct hns_roce_hw hns_roce_hw_v1;
744
745#endif /* _HNS_ROCE_DEVICE_H */