blob: 1303487ffe6b23abe5695eb6518f64258138bd97 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_IB_H
34#define MLX5_IB_H
35
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <rdma/ib_verbs.h>
39#include <rdma/ib_smi.h>
40#include <linux/mlx5/driver.h>
41#include <linux/mlx5/cq.h>
42#include <linux/mlx5/qp.h>
43#include <linux/mlx5/srq.h>
44#include <linux/types.h>
45
46#define mlx5_ib_dbg(dev, format, arg...) \
47pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
48 __LINE__, current->pid, ##arg)
49
50#define mlx5_ib_err(dev, format, arg...) \
51pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
52 __LINE__, current->pid, ##arg)
53
54#define mlx5_ib_warn(dev, format, arg...) \
55pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
56 __LINE__, current->pid, ##arg)
57
Matan Barakb368d7c2015-12-15 20:30:12 +020058#define field_avail(type, fld, sz) (offsetof(type, fld) + \
59 sizeof(((type *)0)->fld) <= (sz))
60
Eli Cohene126ba92013-07-07 17:25:49 +030061enum {
62 MLX5_IB_MMAP_CMD_SHIFT = 8,
63 MLX5_IB_MMAP_CMD_MASK = 0xff,
64};
65
66enum mlx5_ib_mmap_cmd {
67 MLX5_IB_MMAP_REGULAR_PAGE = 0,
Matan Barakd69e3bc2015-12-15 20:30:13 +020068 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
69 /* 5 is chosen in order to be compatible with old versions of libmlx5 */
70 MLX5_IB_MMAP_CORE_CLOCK = 5,
Eli Cohene126ba92013-07-07 17:25:49 +030071};
72
73enum {
74 MLX5_RES_SCAT_DATA32_CQE = 0x1,
75 MLX5_RES_SCAT_DATA64_CQE = 0x2,
76 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
77 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
78};
79
80enum mlx5_ib_latency_class {
81 MLX5_IB_LATENCY_CLASS_LOW,
82 MLX5_IB_LATENCY_CLASS_MEDIUM,
83 MLX5_IB_LATENCY_CLASS_HIGH,
84 MLX5_IB_LATENCY_CLASS_FAST_PATH
85};
86
87enum mlx5_ib_mad_ifc_flags {
88 MLX5_MAD_IFC_IGNORE_MKEY = 1,
89 MLX5_MAD_IFC_IGNORE_BKEY = 2,
90 MLX5_MAD_IFC_NET_VIEW = 4,
91};
92
93struct mlx5_ib_ucontext {
94 struct ib_ucontext ibucontext;
95 struct list_head db_page_list;
96
97 /* protect doorbell record alloc/free
98 */
99 struct mutex db_page_mutex;
100 struct mlx5_uuar_info uuari;
101};
102
103static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
104{
105 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
106}
107
108struct mlx5_ib_pd {
109 struct ib_pd ibpd;
110 u32 pdn;
Eli Cohene126ba92013-07-07 17:25:49 +0300111};
112
113/* Use macros here so that don't have to duplicate
114 * enum ib_send_flags and enum ib_qp_type for low-level driver
115 */
116
117#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
Haggai Eran968e78d2014-12-11 17:04:11 +0200118#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
119#define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
Eli Cohene126ba92013-07-07 17:25:49 +0300120#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
121#define MLX5_IB_WR_UMR IB_WR_RESERVED1
122
123struct wr_list {
124 u16 opcode;
125 u16 next;
126};
127
128struct mlx5_ib_wq {
129 u64 *wrid;
130 u32 *wr_data;
131 struct wr_list *w_list;
132 unsigned *wqe_head;
133 u16 unsig_count;
134
135 /* serialize post to the work queue
136 */
137 spinlock_t lock;
138 int wqe_cnt;
139 int max_post;
140 int max_gs;
141 int offset;
142 int wqe_shift;
143 unsigned head;
144 unsigned tail;
145 u16 cur_post;
146 u16 last_poll;
147 void *qend;
148};
149
150enum {
151 MLX5_QP_USER,
152 MLX5_QP_KERNEL,
153 MLX5_QP_EMPTY
154};
155
Haggai Eran6aec21f2014-12-11 17:04:23 +0200156/*
157 * Connect-IB can trigger up to four concurrent pagefaults
158 * per-QP.
159 */
160enum mlx5_ib_pagefault_context {
161 MLX5_IB_PAGEFAULT_RESPONDER_READ,
162 MLX5_IB_PAGEFAULT_REQUESTOR_READ,
163 MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
164 MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
165 MLX5_IB_PAGEFAULT_CONTEXTS
166};
167
168static inline enum mlx5_ib_pagefault_context
169 mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
170{
171 return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
172}
173
174struct mlx5_ib_pfault {
175 struct work_struct work;
176 struct mlx5_pagefault mpfault;
177};
178
Eli Cohene126ba92013-07-07 17:25:49 +0300179struct mlx5_ib_qp {
180 struct ib_qp ibqp;
181 struct mlx5_core_qp mqp;
182 struct mlx5_buf buf;
183
184 struct mlx5_db db;
185 struct mlx5_ib_wq rq;
186
187 u32 doorbell_qpn;
188 u8 sq_signal_bits;
189 u8 fm_cache;
190 int sq_max_wqes_per_wr;
191 int sq_spare_wqes;
192 struct mlx5_ib_wq sq;
193
194 struct ib_umem *umem;
195 int buf_size;
196
197 /* serialize qp state modifications
198 */
199 struct mutex mutex;
200 u16 xrcdn;
201 u32 flags;
202 u8 port;
203 u8 alt_port;
204 u8 atomic_rd_en;
205 u8 resp_depth;
206 u8 state;
207 int mlx_type;
208 int wq_sig;
209 int scat_cqe;
210 int max_inline_data;
211 struct mlx5_bf *bf;
212 int has_rq;
213
214 /* only for user space QPs. For kernel
215 * we have it from the bf object
216 */
217 int uuarn;
218
219 int create_type;
Sagi Grimberge1e66cc2014-02-23 14:19:07 +0200220
221 /* Store signature errors */
222 bool signature_en;
Haggai Eran6aec21f2014-12-11 17:04:23 +0200223
224#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
225 /*
226 * A flag that is true for QP's that are in a state that doesn't
227 * allow page faults, and shouldn't schedule any more faults.
228 */
229 int disable_page_faults;
230 /*
231 * The disable_page_faults_lock protects a QP's disable_page_faults
232 * field, allowing for a thread to atomically check whether the QP
233 * allows page faults, and if so schedule a page fault.
234 */
235 spinlock_t disable_page_faults_lock;
236 struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
237#endif
Eli Cohene126ba92013-07-07 17:25:49 +0300238};
239
240struct mlx5_ib_cq_buf {
241 struct mlx5_buf buf;
242 struct ib_umem *umem;
243 int cqe_size;
Eli Cohenbde51582014-01-14 17:45:18 +0200244 int nent;
Eli Cohene126ba92013-07-07 17:25:49 +0300245};
246
247enum mlx5_ib_qp_flags {
248 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0,
249 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1,
250};
251
Haggai Eran968e78d2014-12-11 17:04:11 +0200252struct mlx5_umr_wr {
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100253 struct ib_send_wr wr;
Haggai Eran968e78d2014-12-11 17:04:11 +0200254 union {
255 u64 virt_addr;
256 u64 offset;
257 } target;
258 struct ib_pd *pd;
259 unsigned int page_shift;
260 unsigned int npages;
261 u32 length;
262 int access_flags;
263 u32 mkey;
264};
265
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100266static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
267{
268 return container_of(wr, struct mlx5_umr_wr, wr);
269}
270
Eli Cohene126ba92013-07-07 17:25:49 +0300271struct mlx5_shared_mr_info {
272 int mr_id;
273 struct ib_umem *umem;
274};
275
276struct mlx5_ib_cq {
277 struct ib_cq ibcq;
278 struct mlx5_core_cq mcq;
279 struct mlx5_ib_cq_buf buf;
280 struct mlx5_db db;
281
282 /* serialize access to the CQ
283 */
284 spinlock_t lock;
285
286 /* protect resize cq
287 */
288 struct mutex resize_mutex;
Eli Cohenbde51582014-01-14 17:45:18 +0200289 struct mlx5_ib_cq_buf *resize_buf;
Eli Cohene126ba92013-07-07 17:25:49 +0300290 struct ib_umem *resize_umem;
291 int cqe_size;
292};
293
294struct mlx5_ib_srq {
295 struct ib_srq ibsrq;
296 struct mlx5_core_srq msrq;
297 struct mlx5_buf buf;
298 struct mlx5_db db;
299 u64 *wrid;
300 /* protect SRQ hanlding
301 */
302 spinlock_t lock;
303 int head;
304 int tail;
305 u16 wqe_ctr;
306 struct ib_umem *umem;
307 /* serialize arming a SRQ
308 */
309 struct mutex mutex;
310 int wq_sig;
311};
312
313struct mlx5_ib_xrcd {
314 struct ib_xrcd ibxrcd;
315 u32 xrcdn;
316};
317
Haggai Erancc149f752014-12-11 17:04:21 +0200318enum mlx5_ib_mtt_access_flags {
319 MLX5_IB_MTT_READ = (1 << 0),
320 MLX5_IB_MTT_WRITE = (1 << 1),
321};
322
323#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
324
Eli Cohene126ba92013-07-07 17:25:49 +0300325struct mlx5_ib_mr {
326 struct ib_mr ibmr;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300327 void *descs;
328 dma_addr_t desc_map;
329 int ndescs;
330 int max_descs;
331 int desc_size;
Eli Cohene126ba92013-07-07 17:25:49 +0300332 struct mlx5_core_mr mmr;
333 struct ib_umem *umem;
334 struct mlx5_shared_mr_info *smr_info;
335 struct list_head list;
336 int order;
337 int umred;
Eli Cohene126ba92013-07-07 17:25:49 +0300338 int npages;
Eli Cohen746b5582013-10-23 09:53:14 +0300339 struct mlx5_ib_dev *dev;
340 struct mlx5_create_mkey_mbox_out out;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +0200341 struct mlx5_core_sig_ctx *sig;
Haggai Eranb4cfe442014-12-11 17:04:26 +0200342 int live;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300343 void *descs_alloc;
Eli Cohene126ba92013-07-07 17:25:49 +0300344};
345
Shachar Raindela74d2412014-05-22 14:50:12 +0300346struct mlx5_ib_umr_context {
347 enum ib_wc_status status;
348 struct completion done;
349};
350
351static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
352{
353 context->status = -1;
354 init_completion(&context->done);
355}
356
Eli Cohene126ba92013-07-07 17:25:49 +0300357struct umr_common {
358 struct ib_pd *pd;
359 struct ib_cq *cq;
360 struct ib_qp *qp;
Eli Cohene126ba92013-07-07 17:25:49 +0300361 /* control access to UMR QP
362 */
363 struct semaphore sem;
364};
365
366enum {
367 MLX5_FMR_INVALID,
368 MLX5_FMR_VALID,
369 MLX5_FMR_BUSY,
370};
371
Eli Cohene126ba92013-07-07 17:25:49 +0300372struct mlx5_cache_ent {
373 struct list_head head;
374 /* sync access to the cahce entry
375 */
376 spinlock_t lock;
377
378
379 struct dentry *dir;
380 char name[4];
381 u32 order;
382 u32 size;
383 u32 cur;
384 u32 miss;
385 u32 limit;
386
387 struct dentry *fsize;
388 struct dentry *fcur;
389 struct dentry *fmiss;
390 struct dentry *flimit;
391
392 struct mlx5_ib_dev *dev;
393 struct work_struct work;
394 struct delayed_work dwork;
Eli Cohen746b5582013-10-23 09:53:14 +0300395 int pending;
Eli Cohene126ba92013-07-07 17:25:49 +0300396};
397
398struct mlx5_mr_cache {
399 struct workqueue_struct *wq;
400 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
401 int stopped;
402 struct dentry *root;
403 unsigned long last_add;
404};
405
406struct mlx5_ib_resources {
407 struct ib_cq *c0;
408 struct ib_xrcd *x0;
409 struct ib_xrcd *x1;
410 struct ib_pd *p0;
411 struct ib_srq *s0;
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +0300412 struct ib_srq *s1;
Eli Cohene126ba92013-07-07 17:25:49 +0300413};
414
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200415struct mlx5_roce {
416 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
417 * netdev pointer
418 */
419 rwlock_t netdev_lock;
420 struct net_device *netdev;
421 struct notifier_block nb;
422};
423
Eli Cohene126ba92013-07-07 17:25:49 +0300424struct mlx5_ib_dev {
425 struct ib_device ib_dev;
Jack Morgenstein9603b612014-07-28 23:30:22 +0300426 struct mlx5_core_dev *mdev;
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200427 struct mlx5_roce roce;
Eli Cohene126ba92013-07-07 17:25:49 +0300428 MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300429 int num_ports;
Eli Cohene126ba92013-07-07 17:25:49 +0300430 /* serialize update of capability mask
431 */
432 struct mutex cap_mask_mutex;
433 bool ib_active;
434 struct umr_common umrc;
435 /* sync used page count stats
436 */
Eli Cohene126ba92013-07-07 17:25:49 +0300437 struct mlx5_ib_resources devr;
438 struct mlx5_mr_cache cache;
Eli Cohen746b5582013-10-23 09:53:14 +0300439 struct timer_list delay_timer;
440 int fill_delay;
Haggai Eran8cdd3122014-12-11 17:04:20 +0200441#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
442 struct ib_odp_caps odp_caps;
Haggai Eran6aec21f2014-12-11 17:04:23 +0200443 /*
444 * Sleepable RCU that prevents destruction of MRs while they are still
445 * being used by a page fault handler.
446 */
447 struct srcu_struct mr_srcu;
Haggai Eran8cdd3122014-12-11 17:04:20 +0200448#endif
Eli Cohene126ba92013-07-07 17:25:49 +0300449};
450
451static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
452{
453 return container_of(mcq, struct mlx5_ib_cq, mcq);
454}
455
456static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
457{
458 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
459}
460
461static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
462{
463 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
464}
465
Eli Cohene126ba92013-07-07 17:25:49 +0300466static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
467{
468 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
469}
470
471static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
472{
473 return container_of(mqp, struct mlx5_ib_qp, mqp);
474}
475
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200476static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
477{
478 return container_of(mmr, struct mlx5_ib_mr, mmr);
479}
480
Eli Cohene126ba92013-07-07 17:25:49 +0300481static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
482{
483 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
484}
485
486static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
487{
488 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
489}
490
491static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
492{
493 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
494}
495
496static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
497{
498 return container_of(msrq, struct mlx5_ib_srq, msrq);
499}
500
501static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
502{
503 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
504}
505
Eli Cohene126ba92013-07-07 17:25:49 +0300506struct mlx5_ib_ah {
507 struct ib_ah ibah;
508 struct mlx5_av av;
509};
510
511static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
512{
513 return container_of(ibah, struct mlx5_ib_ah, ibah);
514}
515
Eli Cohene126ba92013-07-07 17:25:49 +0300516int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
517 struct mlx5_db *db);
518void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
519void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
520void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
521void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
522int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
Ira Weinya97e2d82015-05-31 17:15:30 -0400523 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
524 const void *in_mad, void *response_mad);
Eli Cohene126ba92013-07-07 17:25:49 +0300525struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
526int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
527int mlx5_ib_destroy_ah(struct ib_ah *ah);
528struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
529 struct ib_srq_init_attr *init_attr,
530 struct ib_udata *udata);
531int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
532 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
533int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
534int mlx5_ib_destroy_srq(struct ib_srq *srq);
535int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
536 struct ib_recv_wr **bad_wr);
537struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
538 struct ib_qp_init_attr *init_attr,
539 struct ib_udata *udata);
540int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
541 int attr_mask, struct ib_udata *udata);
542int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
543 struct ib_qp_init_attr *qp_init_attr);
544int mlx5_ib_destroy_qp(struct ib_qp *qp);
545int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
546 struct ib_send_wr **bad_wr);
547int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
548 struct ib_recv_wr **bad_wr);
549void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
Haggai Eranc1395a22014-12-11 17:04:14 +0200550int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
551 void *buffer, u32 length);
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300552struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
553 const struct ib_cq_init_attr *attr,
554 struct ib_ucontext *context,
Eli Cohene126ba92013-07-07 17:25:49 +0300555 struct ib_udata *udata);
556int mlx5_ib_destroy_cq(struct ib_cq *cq);
557int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
558int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
559int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
560int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
561struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
562struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
563 u64 virt_addr, int access_flags,
564 struct ib_udata *udata);
Haggai Eran832a6b02014-12-11 17:04:22 +0200565int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
566 int npages, int zap);
Eli Cohene126ba92013-07-07 17:25:49 +0300567int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
Sagi Grimberg9bee1782015-07-30 10:32:35 +0300568struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
569 enum ib_mr_type mr_type,
570 u32 max_num_sg);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300571int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
572 struct scatterlist *sg,
573 int sg_nents);
Eli Cohene126ba92013-07-07 17:25:49 +0300574int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
Ira Weinya97e2d82015-05-31 17:15:30 -0400575 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
Ira Weiny4cd7c942015-06-06 14:38:31 -0400576 const struct ib_mad_hdr *in, size_t in_mad_size,
577 struct ib_mad_hdr *out, size_t *out_mad_size,
578 u16 *out_mad_pkey_index);
Eli Cohene126ba92013-07-07 17:25:49 +0300579struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
580 struct ib_ucontext *context,
581 struct ib_udata *udata);
582int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
Eli Cohene126ba92013-07-07 17:25:49 +0300583int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
584int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300585int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
586 struct ib_smp *out_mad);
587int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
588 __be64 *sys_image_guid);
589int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
590 u16 *max_pkeys);
591int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
592 u32 *vendor_id);
593int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
594int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
595int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
596 u16 *pkey);
597int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
598 union ib_gid *gid);
599int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
600 struct ib_port_attr *props);
Eli Cohene126ba92013-07-07 17:25:49 +0300601int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
602 struct ib_port_attr *props);
603int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
604void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
605void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
606 int *ncont, int *order);
Haggai Eran832a6b02014-12-11 17:04:22 +0200607void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
608 int page_shift, size_t offset, size_t num_pages,
609 __be64 *pas, int access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +0300610void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
Haggai Erancc149f752014-12-11 17:04:21 +0200611 int page_shift, __be64 *pas, int access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +0300612void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
613int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
614int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
615int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
616int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
617void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200618int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
619 struct ib_mr_status *mr_status);
Eli Cohene126ba92013-07-07 17:25:49 +0300620
Haggai Eran8cdd3122014-12-11 17:04:20 +0200621#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Haggai Eran6aec21f2014-12-11 17:04:23 +0200622extern struct workqueue_struct *mlx5_ib_page_fault_wq;
623
Saeed Mahameed938fe832015-05-28 22:28:41 +0300624void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
Haggai Eran6aec21f2014-12-11 17:04:23 +0200625void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
626 struct mlx5_ib_pfault *pfault);
627void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
628int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
629void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
630int __init mlx5_ib_odp_init(void);
631void mlx5_ib_odp_cleanup(void);
632void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
633void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200634void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
635 unsigned long end);
Haggai Eran6aec21f2014-12-11 17:04:23 +0200636
637#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
Saeed Mahameed938fe832015-05-28 22:28:41 +0300638static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
Haggai Eran8cdd3122014-12-11 17:04:20 +0200639{
Saeed Mahameed938fe832015-05-28 22:28:41 +0300640 return;
Haggai Eran8cdd3122014-12-11 17:04:20 +0200641}
Haggai Eran6aec21f2014-12-11 17:04:23 +0200642
643static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
644static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
645static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
646static inline int mlx5_ib_odp_init(void) { return 0; }
647static inline void mlx5_ib_odp_cleanup(void) {}
648static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
649static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
650
Haggai Eran8cdd3122014-12-11 17:04:20 +0200651#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
652
Achiad Shochat2811ba52015-12-23 18:47:24 +0200653__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
654 int index);
655
Eli Cohene126ba92013-07-07 17:25:49 +0300656static inline void init_query_mad(struct ib_smp *mad)
657{
658 mad->base_version = 1;
659 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
660 mad->class_version = 1;
661 mad->method = IB_MGMT_METHOD_GET;
662}
663
664static inline u8 convert_access(int acc)
665{
666 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
667 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
668 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
669 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
670 MLX5_PERM_LOCAL_READ;
671}
672
Sagi Grimbergb6364012015-09-02 22:23:04 +0300673static inline int is_qp1(enum ib_qp_type qp_type)
674{
675 return qp_type == IB_QPT_GSI;
676}
677
Haggai Erancc149f752014-12-11 17:04:21 +0200678#define MLX5_MAX_UMR_SHIFT 16
679#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
680
Eli Cohene126ba92013-07-07 17:25:49 +0300681#endif /* MLX5_IB_H */