blob: 71e44302cab25843ea8b883ddfe5c8f8ec404f1e [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_IB_H
34#define MLX5_IB_H
35
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <rdma/ib_verbs.h>
39#include <rdma/ib_smi.h>
40#include <linux/mlx5/driver.h>
41#include <linux/mlx5/cq.h>
42#include <linux/mlx5/qp.h>
43#include <linux/mlx5/srq.h>
44#include <linux/types.h>
majd@mellanox.com146d2f12016-01-14 19:13:02 +020045#include <linux/mlx5/transobj.h>
Eli Cohene126ba92013-07-07 17:25:49 +030046
47#define mlx5_ib_dbg(dev, format, arg...) \
48pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
49 __LINE__, current->pid, ##arg)
50
51#define mlx5_ib_err(dev, format, arg...) \
52pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
53 __LINE__, current->pid, ##arg)
54
55#define mlx5_ib_warn(dev, format, arg...) \
56pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
57 __LINE__, current->pid, ##arg)
58
Matan Barakb368d7c2015-12-15 20:30:12 +020059#define field_avail(type, fld, sz) (offsetof(type, fld) + \
60 sizeof(((type *)0)->fld) <= (sz))
Haggai Abramovskycfb5e082016-01-14 19:12:57 +020061#define MLX5_IB_DEFAULT_UIDX 0xffffff
62#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
Matan Barakb368d7c2015-12-15 20:30:12 +020063
Eli Cohene126ba92013-07-07 17:25:49 +030064enum {
65 MLX5_IB_MMAP_CMD_SHIFT = 8,
66 MLX5_IB_MMAP_CMD_MASK = 0xff,
67};
68
69enum mlx5_ib_mmap_cmd {
70 MLX5_IB_MMAP_REGULAR_PAGE = 0,
Matan Barakd69e3bc2015-12-15 20:30:13 +020071 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
72 /* 5 is chosen in order to be compatible with old versions of libmlx5 */
73 MLX5_IB_MMAP_CORE_CLOCK = 5,
Eli Cohene126ba92013-07-07 17:25:49 +030074};
75
76enum {
77 MLX5_RES_SCAT_DATA32_CQE = 0x1,
78 MLX5_RES_SCAT_DATA64_CQE = 0x2,
79 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
80 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
81};
82
83enum mlx5_ib_latency_class {
84 MLX5_IB_LATENCY_CLASS_LOW,
85 MLX5_IB_LATENCY_CLASS_MEDIUM,
86 MLX5_IB_LATENCY_CLASS_HIGH,
87 MLX5_IB_LATENCY_CLASS_FAST_PATH
88};
89
90enum mlx5_ib_mad_ifc_flags {
91 MLX5_MAD_IFC_IGNORE_MKEY = 1,
92 MLX5_MAD_IFC_IGNORE_BKEY = 2,
93 MLX5_MAD_IFC_NET_VIEW = 4,
94};
95
Leon Romanovsky051f2632015-12-20 12:16:11 +020096enum {
97 MLX5_CROSS_CHANNEL_UUAR = 0,
98};
99
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200100enum {
101 MLX5_CQE_VERSION_V0,
102 MLX5_CQE_VERSION_V1,
103};
104
Eli Cohene126ba92013-07-07 17:25:49 +0300105struct mlx5_ib_ucontext {
106 struct ib_ucontext ibucontext;
107 struct list_head db_page_list;
108
109 /* protect doorbell record alloc/free
110 */
111 struct mutex db_page_mutex;
112 struct mlx5_uuar_info uuari;
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200113 u8 cqe_version;
majd@mellanox.com146d2f12016-01-14 19:13:02 +0200114 /* Transport Domain number */
115 u32 tdn;
Eli Cohene126ba92013-07-07 17:25:49 +0300116};
117
118static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
119{
120 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
121}
122
123struct mlx5_ib_pd {
124 struct ib_pd ibpd;
125 u32 pdn;
Eli Cohene126ba92013-07-07 17:25:49 +0300126};
127
128/* Use macros here so that don't have to duplicate
129 * enum ib_send_flags and enum ib_qp_type for low-level driver
130 */
131
132#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
Haggai Eran968e78d2014-12-11 17:04:11 +0200133#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
134#define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
Eli Cohene126ba92013-07-07 17:25:49 +0300135#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
136#define MLX5_IB_WR_UMR IB_WR_RESERVED1
137
138struct wr_list {
139 u16 opcode;
140 u16 next;
141};
142
143struct mlx5_ib_wq {
144 u64 *wrid;
145 u32 *wr_data;
146 struct wr_list *w_list;
147 unsigned *wqe_head;
148 u16 unsig_count;
149
150 /* serialize post to the work queue
151 */
152 spinlock_t lock;
153 int wqe_cnt;
154 int max_post;
155 int max_gs;
156 int offset;
157 int wqe_shift;
158 unsigned head;
159 unsigned tail;
160 u16 cur_post;
161 u16 last_poll;
162 void *qend;
163};
164
165enum {
166 MLX5_QP_USER,
167 MLX5_QP_KERNEL,
168 MLX5_QP_EMPTY
169};
170
Haggai Eran6aec21f2014-12-11 17:04:23 +0200171/*
172 * Connect-IB can trigger up to four concurrent pagefaults
173 * per-QP.
174 */
175enum mlx5_ib_pagefault_context {
176 MLX5_IB_PAGEFAULT_RESPONDER_READ,
177 MLX5_IB_PAGEFAULT_REQUESTOR_READ,
178 MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
179 MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
180 MLX5_IB_PAGEFAULT_CONTEXTS
181};
182
183static inline enum mlx5_ib_pagefault_context
184 mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
185{
186 return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
187}
188
189struct mlx5_ib_pfault {
190 struct work_struct work;
191 struct mlx5_pagefault mpfault;
192};
193
majd@mellanox.com19098df2016-01-14 19:13:03 +0200194struct mlx5_ib_ubuffer {
195 struct ib_umem *umem;
196 int buf_size;
197 u64 buf_addr;
198};
199
200struct mlx5_ib_qp_base {
201 struct mlx5_ib_qp *container_mibqp;
202 struct mlx5_core_qp mqp;
203 struct mlx5_ib_ubuffer ubuffer;
204};
205
206struct mlx5_ib_qp_trans {
207 struct mlx5_ib_qp_base base;
208 u16 xrcdn;
209 u8 alt_port;
210 u8 atomic_rd_en;
211 u8 resp_depth;
212};
213
Eli Cohene126ba92013-07-07 17:25:49 +0300214struct mlx5_ib_qp {
215 struct ib_qp ibqp;
majd@mellanox.com19098df2016-01-14 19:13:03 +0200216 struct mlx5_ib_qp_trans trans_qp;
Eli Cohene126ba92013-07-07 17:25:49 +0300217 struct mlx5_buf buf;
218
219 struct mlx5_db db;
220 struct mlx5_ib_wq rq;
221
Eli Cohene126ba92013-07-07 17:25:49 +0300222 u8 sq_signal_bits;
223 u8 fm_cache;
Eli Cohene126ba92013-07-07 17:25:49 +0300224 struct mlx5_ib_wq sq;
225
Eli Cohene126ba92013-07-07 17:25:49 +0300226 /* serialize qp state modifications
227 */
228 struct mutex mutex;
Eli Cohene126ba92013-07-07 17:25:49 +0300229 u32 flags;
230 u8 port;
Eli Cohene126ba92013-07-07 17:25:49 +0300231 u8 state;
Eli Cohene126ba92013-07-07 17:25:49 +0300232 int wq_sig;
233 int scat_cqe;
234 int max_inline_data;
235 struct mlx5_bf *bf;
236 int has_rq;
237
238 /* only for user space QPs. For kernel
239 * we have it from the bf object
240 */
241 int uuarn;
242
243 int create_type;
Sagi Grimberge1e66cc2014-02-23 14:19:07 +0200244
245 /* Store signature errors */
246 bool signature_en;
Haggai Eran6aec21f2014-12-11 17:04:23 +0200247
248#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
249 /*
250 * A flag that is true for QP's that are in a state that doesn't
251 * allow page faults, and shouldn't schedule any more faults.
252 */
253 int disable_page_faults;
254 /*
255 * The disable_page_faults_lock protects a QP's disable_page_faults
256 * field, allowing for a thread to atomically check whether the QP
257 * allows page faults, and if so schedule a page fault.
258 */
259 spinlock_t disable_page_faults_lock;
260 struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
261#endif
Eli Cohene126ba92013-07-07 17:25:49 +0300262};
263
264struct mlx5_ib_cq_buf {
265 struct mlx5_buf buf;
266 struct ib_umem *umem;
267 int cqe_size;
Eli Cohenbde51582014-01-14 17:45:18 +0200268 int nent;
Eli Cohene126ba92013-07-07 17:25:49 +0300269};
270
271enum mlx5_ib_qp_flags {
272 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0,
273 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1,
Leon Romanovsky051f2632015-12-20 12:16:11 +0200274 MLX5_IB_QP_CROSS_CHANNEL = 1 << 2,
275 MLX5_IB_QP_MANAGED_SEND = 1 << 3,
276 MLX5_IB_QP_MANAGED_RECV = 1 << 4,
Eli Cohene126ba92013-07-07 17:25:49 +0300277};
278
Haggai Eran968e78d2014-12-11 17:04:11 +0200279struct mlx5_umr_wr {
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100280 struct ib_send_wr wr;
Haggai Eran968e78d2014-12-11 17:04:11 +0200281 union {
282 u64 virt_addr;
283 u64 offset;
284 } target;
285 struct ib_pd *pd;
286 unsigned int page_shift;
287 unsigned int npages;
288 u32 length;
289 int access_flags;
290 u32 mkey;
291};
292
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100293static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
294{
295 return container_of(wr, struct mlx5_umr_wr, wr);
296}
297
Eli Cohene126ba92013-07-07 17:25:49 +0300298struct mlx5_shared_mr_info {
299 int mr_id;
300 struct ib_umem *umem;
301};
302
303struct mlx5_ib_cq {
304 struct ib_cq ibcq;
305 struct mlx5_core_cq mcq;
306 struct mlx5_ib_cq_buf buf;
307 struct mlx5_db db;
308
309 /* serialize access to the CQ
310 */
311 spinlock_t lock;
312
313 /* protect resize cq
314 */
315 struct mutex resize_mutex;
Eli Cohenbde51582014-01-14 17:45:18 +0200316 struct mlx5_ib_cq_buf *resize_buf;
Eli Cohene126ba92013-07-07 17:25:49 +0300317 struct ib_umem *resize_umem;
318 int cqe_size;
Leon Romanovsky051f2632015-12-20 12:16:11 +0200319 u32 create_flags;
Eli Cohene126ba92013-07-07 17:25:49 +0300320};
321
322struct mlx5_ib_srq {
323 struct ib_srq ibsrq;
324 struct mlx5_core_srq msrq;
325 struct mlx5_buf buf;
326 struct mlx5_db db;
327 u64 *wrid;
328 /* protect SRQ hanlding
329 */
330 spinlock_t lock;
331 int head;
332 int tail;
333 u16 wqe_ctr;
334 struct ib_umem *umem;
335 /* serialize arming a SRQ
336 */
337 struct mutex mutex;
338 int wq_sig;
339};
340
341struct mlx5_ib_xrcd {
342 struct ib_xrcd ibxrcd;
343 u32 xrcdn;
344};
345
Haggai Erancc149f752014-12-11 17:04:21 +0200346enum mlx5_ib_mtt_access_flags {
347 MLX5_IB_MTT_READ = (1 << 0),
348 MLX5_IB_MTT_WRITE = (1 << 1),
349};
350
351#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
352
Eli Cohene126ba92013-07-07 17:25:49 +0300353struct mlx5_ib_mr {
354 struct ib_mr ibmr;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300355 void *descs;
356 dma_addr_t desc_map;
357 int ndescs;
358 int max_descs;
359 int desc_size;
Eli Cohene126ba92013-07-07 17:25:49 +0300360 struct mlx5_core_mr mmr;
361 struct ib_umem *umem;
362 struct mlx5_shared_mr_info *smr_info;
363 struct list_head list;
364 int order;
365 int umred;
Eli Cohene126ba92013-07-07 17:25:49 +0300366 int npages;
Eli Cohen746b5582013-10-23 09:53:14 +0300367 struct mlx5_ib_dev *dev;
368 struct mlx5_create_mkey_mbox_out out;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +0200369 struct mlx5_core_sig_ctx *sig;
Haggai Eranb4cfe442014-12-11 17:04:26 +0200370 int live;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300371 void *descs_alloc;
Eli Cohene126ba92013-07-07 17:25:49 +0300372};
373
Shachar Raindela74d2412014-05-22 14:50:12 +0300374struct mlx5_ib_umr_context {
375 enum ib_wc_status status;
376 struct completion done;
377};
378
379static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
380{
381 context->status = -1;
382 init_completion(&context->done);
383}
384
Eli Cohene126ba92013-07-07 17:25:49 +0300385struct umr_common {
386 struct ib_pd *pd;
387 struct ib_cq *cq;
388 struct ib_qp *qp;
Eli Cohene126ba92013-07-07 17:25:49 +0300389 /* control access to UMR QP
390 */
391 struct semaphore sem;
392};
393
394enum {
395 MLX5_FMR_INVALID,
396 MLX5_FMR_VALID,
397 MLX5_FMR_BUSY,
398};
399
Eli Cohene126ba92013-07-07 17:25:49 +0300400struct mlx5_cache_ent {
401 struct list_head head;
402 /* sync access to the cahce entry
403 */
404 spinlock_t lock;
405
406
407 struct dentry *dir;
408 char name[4];
409 u32 order;
410 u32 size;
411 u32 cur;
412 u32 miss;
413 u32 limit;
414
415 struct dentry *fsize;
416 struct dentry *fcur;
417 struct dentry *fmiss;
418 struct dentry *flimit;
419
420 struct mlx5_ib_dev *dev;
421 struct work_struct work;
422 struct delayed_work dwork;
Eli Cohen746b5582013-10-23 09:53:14 +0300423 int pending;
Eli Cohene126ba92013-07-07 17:25:49 +0300424};
425
426struct mlx5_mr_cache {
427 struct workqueue_struct *wq;
428 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
429 int stopped;
430 struct dentry *root;
431 unsigned long last_add;
432};
433
434struct mlx5_ib_resources {
435 struct ib_cq *c0;
436 struct ib_xrcd *x0;
437 struct ib_xrcd *x1;
438 struct ib_pd *p0;
439 struct ib_srq *s0;
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +0300440 struct ib_srq *s1;
Eli Cohene126ba92013-07-07 17:25:49 +0300441};
442
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200443struct mlx5_roce {
444 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
445 * netdev pointer
446 */
447 rwlock_t netdev_lock;
448 struct net_device *netdev;
449 struct notifier_block nb;
450};
451
Eli Cohene126ba92013-07-07 17:25:49 +0300452struct mlx5_ib_dev {
453 struct ib_device ib_dev;
Jack Morgenstein9603b612014-07-28 23:30:22 +0300454 struct mlx5_core_dev *mdev;
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200455 struct mlx5_roce roce;
Eli Cohene126ba92013-07-07 17:25:49 +0300456 MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300457 int num_ports;
Eli Cohene126ba92013-07-07 17:25:49 +0300458 /* serialize update of capability mask
459 */
460 struct mutex cap_mask_mutex;
461 bool ib_active;
462 struct umr_common umrc;
463 /* sync used page count stats
464 */
Eli Cohene126ba92013-07-07 17:25:49 +0300465 struct mlx5_ib_resources devr;
466 struct mlx5_mr_cache cache;
Eli Cohen746b5582013-10-23 09:53:14 +0300467 struct timer_list delay_timer;
468 int fill_delay;
Haggai Eran8cdd3122014-12-11 17:04:20 +0200469#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
470 struct ib_odp_caps odp_caps;
Haggai Eran6aec21f2014-12-11 17:04:23 +0200471 /*
472 * Sleepable RCU that prevents destruction of MRs while they are still
473 * being used by a page fault handler.
474 */
475 struct srcu_struct mr_srcu;
Haggai Eran8cdd3122014-12-11 17:04:20 +0200476#endif
Eli Cohene126ba92013-07-07 17:25:49 +0300477};
478
479static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
480{
481 return container_of(mcq, struct mlx5_ib_cq, mcq);
482}
483
484static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
485{
486 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
487}
488
489static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
490{
491 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
492}
493
Eli Cohene126ba92013-07-07 17:25:49 +0300494static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
495{
496 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
497}
498
499static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
500{
majd@mellanox.com19098df2016-01-14 19:13:03 +0200501 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
Eli Cohene126ba92013-07-07 17:25:49 +0300502}
503
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200504static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
505{
506 return container_of(mmr, struct mlx5_ib_mr, mmr);
507}
508
Eli Cohene126ba92013-07-07 17:25:49 +0300509static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
510{
511 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
512}
513
514static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
515{
516 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
517}
518
519static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
520{
521 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
522}
523
524static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
525{
526 return container_of(msrq, struct mlx5_ib_srq, msrq);
527}
528
529static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
530{
531 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
532}
533
Eli Cohene126ba92013-07-07 17:25:49 +0300534struct mlx5_ib_ah {
535 struct ib_ah ibah;
536 struct mlx5_av av;
537};
538
539static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
540{
541 return container_of(ibah, struct mlx5_ib_ah, ibah);
542}
543
Eli Cohene126ba92013-07-07 17:25:49 +0300544int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
545 struct mlx5_db *db);
546void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
547void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
548void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
549void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
550int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
Ira Weinya97e2d82015-05-31 17:15:30 -0400551 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
552 const void *in_mad, void *response_mad);
Eli Cohene126ba92013-07-07 17:25:49 +0300553struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
554int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
555int mlx5_ib_destroy_ah(struct ib_ah *ah);
556struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
557 struct ib_srq_init_attr *init_attr,
558 struct ib_udata *udata);
559int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
560 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
561int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
562int mlx5_ib_destroy_srq(struct ib_srq *srq);
563int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
564 struct ib_recv_wr **bad_wr);
565struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
566 struct ib_qp_init_attr *init_attr,
567 struct ib_udata *udata);
568int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
569 int attr_mask, struct ib_udata *udata);
570int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
571 struct ib_qp_init_attr *qp_init_attr);
572int mlx5_ib_destroy_qp(struct ib_qp *qp);
573int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
574 struct ib_send_wr **bad_wr);
575int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
576 struct ib_recv_wr **bad_wr);
577void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
Haggai Eranc1395a22014-12-11 17:04:14 +0200578int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
majd@mellanox.com19098df2016-01-14 19:13:03 +0200579 void *buffer, u32 length,
580 struct mlx5_ib_qp_base *base);
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300581struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
582 const struct ib_cq_init_attr *attr,
583 struct ib_ucontext *context,
Eli Cohene126ba92013-07-07 17:25:49 +0300584 struct ib_udata *udata);
585int mlx5_ib_destroy_cq(struct ib_cq *cq);
586int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
587int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
588int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
589int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
590struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
591struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
592 u64 virt_addr, int access_flags,
593 struct ib_udata *udata);
Haggai Eran832a6b02014-12-11 17:04:22 +0200594int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
595 int npages, int zap);
Eli Cohene126ba92013-07-07 17:25:49 +0300596int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
Sagi Grimberg9bee1782015-07-30 10:32:35 +0300597struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
598 enum ib_mr_type mr_type,
599 u32 max_num_sg);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300600int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
601 struct scatterlist *sg,
602 int sg_nents);
Eli Cohene126ba92013-07-07 17:25:49 +0300603int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
Ira Weinya97e2d82015-05-31 17:15:30 -0400604 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
Ira Weiny4cd7c942015-06-06 14:38:31 -0400605 const struct ib_mad_hdr *in, size_t in_mad_size,
606 struct ib_mad_hdr *out, size_t *out_mad_size,
607 u16 *out_mad_pkey_index);
Eli Cohene126ba92013-07-07 17:25:49 +0300608struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
609 struct ib_ucontext *context,
610 struct ib_udata *udata);
611int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
Eli Cohene126ba92013-07-07 17:25:49 +0300612int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
613int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300614int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
615 struct ib_smp *out_mad);
616int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
617 __be64 *sys_image_guid);
618int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
619 u16 *max_pkeys);
620int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
621 u32 *vendor_id);
622int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
623int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
624int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
625 u16 *pkey);
626int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
627 union ib_gid *gid);
628int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
629 struct ib_port_attr *props);
Eli Cohene126ba92013-07-07 17:25:49 +0300630int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
631 struct ib_port_attr *props);
632int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
633void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
634void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
635 int *ncont, int *order);
Haggai Eran832a6b02014-12-11 17:04:22 +0200636void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
637 int page_shift, size_t offset, size_t num_pages,
638 __be64 *pas, int access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +0300639void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
Haggai Erancc149f752014-12-11 17:04:21 +0200640 int page_shift, __be64 *pas, int access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +0300641void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
642int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
643int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
644int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
645int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
646void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200647int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
648 struct ib_mr_status *mr_status);
Eli Cohene126ba92013-07-07 17:25:49 +0300649
Haggai Eran8cdd3122014-12-11 17:04:20 +0200650#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Haggai Eran6aec21f2014-12-11 17:04:23 +0200651extern struct workqueue_struct *mlx5_ib_page_fault_wq;
652
Saeed Mahameed938fe832015-05-28 22:28:41 +0300653void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
Haggai Eran6aec21f2014-12-11 17:04:23 +0200654void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
655 struct mlx5_ib_pfault *pfault);
656void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
657int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
658void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
659int __init mlx5_ib_odp_init(void);
660void mlx5_ib_odp_cleanup(void);
661void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
662void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200663void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
664 unsigned long end);
Haggai Eran6aec21f2014-12-11 17:04:23 +0200665
666#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
Saeed Mahameed938fe832015-05-28 22:28:41 +0300667static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
Haggai Eran8cdd3122014-12-11 17:04:20 +0200668{
Saeed Mahameed938fe832015-05-28 22:28:41 +0300669 return;
Haggai Eran8cdd3122014-12-11 17:04:20 +0200670}
Haggai Eran6aec21f2014-12-11 17:04:23 +0200671
672static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
673static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
674static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
675static inline int mlx5_ib_odp_init(void) { return 0; }
676static inline void mlx5_ib_odp_cleanup(void) {}
677static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
678static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
679
Haggai Eran8cdd3122014-12-11 17:04:20 +0200680#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
681
Achiad Shochat2811ba52015-12-23 18:47:24 +0200682__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
683 int index);
684
Eli Cohene126ba92013-07-07 17:25:49 +0300685static inline void init_query_mad(struct ib_smp *mad)
686{
687 mad->base_version = 1;
688 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
689 mad->class_version = 1;
690 mad->method = IB_MGMT_METHOD_GET;
691}
692
693static inline u8 convert_access(int acc)
694{
695 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
696 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
697 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
698 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
699 MLX5_PERM_LOCAL_READ;
700}
701
Sagi Grimbergb6364012015-09-02 22:23:04 +0300702static inline int is_qp1(enum ib_qp_type qp_type)
703{
704 return qp_type == IB_QPT_GSI;
705}
706
Haggai Erancc149f752014-12-11 17:04:21 +0200707#define MLX5_MAX_UMR_SHIFT 16
708#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
709
Leon Romanovsky051f2632015-12-20 12:16:11 +0200710static inline u32 check_cq_create_flags(u32 flags)
711{
712 /*
713 * It returns non-zero value for unsupported CQ
714 * create flags, otherwise it returns zero.
715 */
716 return (flags & ~IB_CQ_FLAGS_IGNORE_OVERRUN);
717}
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200718
719static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
720 u32 *user_index)
721{
722 if (cqe_version) {
723 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
724 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
725 return -EINVAL;
726 *user_index = cmd_uidx;
727 } else {
728 *user_index = MLX5_IB_DEFAULT_UIDX;
729 }
730
731 return 0;
732}
Eli Cohene126ba92013-07-07 17:25:49 +0300733#endif /* MLX5_IB_H */