blob: 6e81217b7c67f9aadb7293f753394a1adb52d1fa [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_IB_H
34#define MLX5_IB_H
35
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <rdma/ib_verbs.h>
39#include <rdma/ib_smi.h>
40#include <linux/mlx5/driver.h>
41#include <linux/mlx5/cq.h>
42#include <linux/mlx5/qp.h>
43#include <linux/mlx5/srq.h>
44#include <linux/types.h>
majd@mellanox.com146d2f12016-01-14 19:13:02 +020045#include <linux/mlx5/transobj.h>
Matan Barakd2370e02016-02-29 18:05:30 +020046#include <rdma/ib_user_verbs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030047
48#define mlx5_ib_dbg(dev, format, arg...) \
49pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
50 __LINE__, current->pid, ##arg)
51
52#define mlx5_ib_err(dev, format, arg...) \
53pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
54 __LINE__, current->pid, ##arg)
55
56#define mlx5_ib_warn(dev, format, arg...) \
57pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
58 __LINE__, current->pid, ##arg)
59
Matan Barakb368d7c2015-12-15 20:30:12 +020060#define field_avail(type, fld, sz) (offsetof(type, fld) + \
61 sizeof(((type *)0)->fld) <= (sz))
Haggai Abramovskycfb5e082016-01-14 19:12:57 +020062#define MLX5_IB_DEFAULT_UIDX 0xffffff
63#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
Matan Barakb368d7c2015-12-15 20:30:12 +020064
Eli Cohene126ba92013-07-07 17:25:49 +030065enum {
66 MLX5_IB_MMAP_CMD_SHIFT = 8,
67 MLX5_IB_MMAP_CMD_MASK = 0xff,
68};
69
70enum mlx5_ib_mmap_cmd {
71 MLX5_IB_MMAP_REGULAR_PAGE = 0,
Matan Barakd69e3bc2015-12-15 20:30:13 +020072 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
Guy Levi37aa5c32016-04-27 16:49:50 +030073 MLX5_IB_MMAP_WC_PAGE = 2,
74 MLX5_IB_MMAP_NC_PAGE = 3,
Matan Barakd69e3bc2015-12-15 20:30:13 +020075 /* 5 is chosen in order to be compatible with old versions of libmlx5 */
76 MLX5_IB_MMAP_CORE_CLOCK = 5,
Eli Cohene126ba92013-07-07 17:25:49 +030077};
78
79enum {
80 MLX5_RES_SCAT_DATA32_CQE = 0x1,
81 MLX5_RES_SCAT_DATA64_CQE = 0x2,
82 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
83 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
84};
85
86enum mlx5_ib_latency_class {
87 MLX5_IB_LATENCY_CLASS_LOW,
88 MLX5_IB_LATENCY_CLASS_MEDIUM,
89 MLX5_IB_LATENCY_CLASS_HIGH,
90 MLX5_IB_LATENCY_CLASS_FAST_PATH
91};
92
93enum mlx5_ib_mad_ifc_flags {
94 MLX5_MAD_IFC_IGNORE_MKEY = 1,
95 MLX5_MAD_IFC_IGNORE_BKEY = 2,
96 MLX5_MAD_IFC_NET_VIEW = 4,
97};
98
Leon Romanovsky051f2632015-12-20 12:16:11 +020099enum {
100 MLX5_CROSS_CHANNEL_UUAR = 0,
101};
102
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200103enum {
104 MLX5_CQE_VERSION_V0,
105 MLX5_CQE_VERSION_V1,
106};
107
Eli Cohene126ba92013-07-07 17:25:49 +0300108struct mlx5_ib_ucontext {
109 struct ib_ucontext ibucontext;
110 struct list_head db_page_list;
111
112 /* protect doorbell record alloc/free
113 */
114 struct mutex db_page_mutex;
115 struct mlx5_uuar_info uuari;
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200116 u8 cqe_version;
majd@mellanox.com146d2f12016-01-14 19:13:02 +0200117 /* Transport Domain number */
118 u32 tdn;
Eli Cohene126ba92013-07-07 17:25:49 +0300119};
120
121static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
122{
123 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
124}
125
126struct mlx5_ib_pd {
127 struct ib_pd ibpd;
128 u32 pdn;
Eli Cohene126ba92013-07-07 17:25:49 +0300129};
130
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200131#define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
Maor Gottlieb35d190112016-03-07 18:51:47 +0200132#define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200133#if (MLX5_IB_FLOW_LAST_PRIO <= 0)
134#error "Invalid number of bypass priorities"
135#endif
136#define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
137
138#define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
139struct mlx5_ib_flow_prio {
140 struct mlx5_flow_table *flow_table;
141 unsigned int refcount;
142};
143
144struct mlx5_ib_flow_handler {
145 struct list_head list;
146 struct ib_flow ibflow;
147 unsigned int prio;
148 struct mlx5_flow_rule *rule;
149};
150
151struct mlx5_ib_flow_db {
152 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
153 /* Protect flow steering bypass flow tables
154 * when add/del flow rules.
155 * only single add/removal of flow steering rule could be done
156 * simultaneously.
157 */
158 struct mutex lock;
159};
160
Eli Cohene126ba92013-07-07 17:25:49 +0300161/* Use macros here so that don't have to duplicate
162 * enum ib_send_flags and enum ib_qp_type for low-level driver
163 */
164
165#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
Haggai Eran968e78d2014-12-11 17:04:11 +0200166#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
167#define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
Noa Osherovich56e11d62016-02-29 16:46:51 +0200168
169#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 3)
170#define MLX5_IB_SEND_UMR_UPDATE_PD (IB_SEND_RESERVED_START << 4)
171#define MLX5_IB_SEND_UMR_UPDATE_ACCESS IB_SEND_RESERVED_END
172
Eli Cohene126ba92013-07-07 17:25:49 +0300173#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
Haggai Erand16e91d2016-02-29 15:45:05 +0200174/*
175 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
176 * creates the actual hardware QP.
177 */
178#define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
Eli Cohene126ba92013-07-07 17:25:49 +0300179#define MLX5_IB_WR_UMR IB_WR_RESERVED1
180
Haggai Eranb11a4f92016-02-29 15:45:03 +0200181/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
182 *
183 * These flags are intended for internal use by the mlx5_ib driver, and they
184 * rely on the range reserved for that use in the ib_qp_create_flags enum.
185 */
186
187/* Create a UD QP whose source QP number is 1 */
188static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
189{
190 return IB_QP_CREATE_RESERVED_START;
191}
192
Eli Cohene126ba92013-07-07 17:25:49 +0300193struct wr_list {
194 u16 opcode;
195 u16 next;
196};
197
198struct mlx5_ib_wq {
199 u64 *wrid;
200 u32 *wr_data;
201 struct wr_list *w_list;
202 unsigned *wqe_head;
203 u16 unsig_count;
204
205 /* serialize post to the work queue
206 */
207 spinlock_t lock;
208 int wqe_cnt;
209 int max_post;
210 int max_gs;
211 int offset;
212 int wqe_shift;
213 unsigned head;
214 unsigned tail;
215 u16 cur_post;
216 u16 last_poll;
217 void *qend;
218};
219
220enum {
221 MLX5_QP_USER,
222 MLX5_QP_KERNEL,
223 MLX5_QP_EMPTY
224};
225
Haggai Eran6aec21f2014-12-11 17:04:23 +0200226/*
227 * Connect-IB can trigger up to four concurrent pagefaults
228 * per-QP.
229 */
230enum mlx5_ib_pagefault_context {
231 MLX5_IB_PAGEFAULT_RESPONDER_READ,
232 MLX5_IB_PAGEFAULT_REQUESTOR_READ,
233 MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
234 MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
235 MLX5_IB_PAGEFAULT_CONTEXTS
236};
237
238static inline enum mlx5_ib_pagefault_context
239 mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
240{
241 return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
242}
243
244struct mlx5_ib_pfault {
245 struct work_struct work;
246 struct mlx5_pagefault mpfault;
247};
248
majd@mellanox.com19098df2016-01-14 19:13:03 +0200249struct mlx5_ib_ubuffer {
250 struct ib_umem *umem;
251 int buf_size;
252 u64 buf_addr;
253};
254
255struct mlx5_ib_qp_base {
256 struct mlx5_ib_qp *container_mibqp;
257 struct mlx5_core_qp mqp;
258 struct mlx5_ib_ubuffer ubuffer;
259};
260
261struct mlx5_ib_qp_trans {
262 struct mlx5_ib_qp_base base;
263 u16 xrcdn;
264 u8 alt_port;
265 u8 atomic_rd_en;
266 u8 resp_depth;
267};
268
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200269struct mlx5_ib_rq {
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200270 struct mlx5_ib_qp_base base;
271 struct mlx5_ib_wq *rq;
272 struct mlx5_ib_ubuffer ubuffer;
273 struct mlx5_db *doorbell;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200274 u32 tirn;
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200275 u8 state;
276};
277
278struct mlx5_ib_sq {
279 struct mlx5_ib_qp_base base;
280 struct mlx5_ib_wq *sq;
281 struct mlx5_ib_ubuffer ubuffer;
282 struct mlx5_db *doorbell;
283 u32 tisn;
284 u8 state;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200285};
286
287struct mlx5_ib_raw_packet_qp {
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200288 struct mlx5_ib_sq sq;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200289 struct mlx5_ib_rq rq;
290};
291
Eli Cohene126ba92013-07-07 17:25:49 +0300292struct mlx5_ib_qp {
293 struct ib_qp ibqp;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200294 union {
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200295 struct mlx5_ib_qp_trans trans_qp;
296 struct mlx5_ib_raw_packet_qp raw_packet_qp;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200297 };
Eli Cohene126ba92013-07-07 17:25:49 +0300298 struct mlx5_buf buf;
299
300 struct mlx5_db db;
301 struct mlx5_ib_wq rq;
302
Eli Cohene126ba92013-07-07 17:25:49 +0300303 u8 sq_signal_bits;
304 u8 fm_cache;
Eli Cohene126ba92013-07-07 17:25:49 +0300305 struct mlx5_ib_wq sq;
306
Eli Cohene126ba92013-07-07 17:25:49 +0300307 /* serialize qp state modifications
308 */
309 struct mutex mutex;
Eli Cohene126ba92013-07-07 17:25:49 +0300310 u32 flags;
311 u8 port;
Eli Cohene126ba92013-07-07 17:25:49 +0300312 u8 state;
Eli Cohene126ba92013-07-07 17:25:49 +0300313 int wq_sig;
314 int scat_cqe;
315 int max_inline_data;
316 struct mlx5_bf *bf;
317 int has_rq;
318
319 /* only for user space QPs. For kernel
320 * we have it from the bf object
321 */
322 int uuarn;
323
324 int create_type;
Sagi Grimberge1e66cc2014-02-23 14:19:07 +0200325
326 /* Store signature errors */
327 bool signature_en;
Haggai Eran6aec21f2014-12-11 17:04:23 +0200328
329#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
330 /*
331 * A flag that is true for QP's that are in a state that doesn't
332 * allow page faults, and shouldn't schedule any more faults.
333 */
334 int disable_page_faults;
335 /*
336 * The disable_page_faults_lock protects a QP's disable_page_faults
337 * field, allowing for a thread to atomically check whether the QP
338 * allows page faults, and if so schedule a page fault.
339 */
340 spinlock_t disable_page_faults_lock;
341 struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
342#endif
Eli Cohene126ba92013-07-07 17:25:49 +0300343};
344
345struct mlx5_ib_cq_buf {
346 struct mlx5_buf buf;
347 struct ib_umem *umem;
348 int cqe_size;
Eli Cohenbde51582014-01-14 17:45:18 +0200349 int nent;
Eli Cohene126ba92013-07-07 17:25:49 +0300350};
351
352enum mlx5_ib_qp_flags {
Erez Shitritf0313962016-02-21 16:27:17 +0200353 MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
354 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
355 MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
356 MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
357 MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
358 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
Haggai Eranb11a4f92016-02-29 15:45:03 +0200359 /* QP uses 1 as its source QP number */
360 MLX5_IB_QP_SQPN_QP1 = 1 << 6,
Eli Cohene126ba92013-07-07 17:25:49 +0300361};
362
Haggai Eran968e78d2014-12-11 17:04:11 +0200363struct mlx5_umr_wr {
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100364 struct ib_send_wr wr;
Haggai Eran968e78d2014-12-11 17:04:11 +0200365 union {
366 u64 virt_addr;
367 u64 offset;
368 } target;
369 struct ib_pd *pd;
370 unsigned int page_shift;
371 unsigned int npages;
372 u32 length;
373 int access_flags;
374 u32 mkey;
375};
376
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100377static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
378{
379 return container_of(wr, struct mlx5_umr_wr, wr);
380}
381
Eli Cohene126ba92013-07-07 17:25:49 +0300382struct mlx5_shared_mr_info {
383 int mr_id;
384 struct ib_umem *umem;
385};
386
387struct mlx5_ib_cq {
388 struct ib_cq ibcq;
389 struct mlx5_core_cq mcq;
390 struct mlx5_ib_cq_buf buf;
391 struct mlx5_db db;
392
393 /* serialize access to the CQ
394 */
395 spinlock_t lock;
396
397 /* protect resize cq
398 */
399 struct mutex resize_mutex;
Eli Cohenbde51582014-01-14 17:45:18 +0200400 struct mlx5_ib_cq_buf *resize_buf;
Eli Cohene126ba92013-07-07 17:25:49 +0300401 struct ib_umem *resize_umem;
402 int cqe_size;
Leon Romanovsky051f2632015-12-20 12:16:11 +0200403 u32 create_flags;
Haggai Eran25361e02016-02-29 15:45:08 +0200404 struct list_head wc_list;
405 enum ib_cq_notify_flags notify_flags;
406 struct work_struct notify_work;
407};
408
409struct mlx5_ib_wc {
410 struct ib_wc wc;
411 struct list_head list;
Eli Cohene126ba92013-07-07 17:25:49 +0300412};
413
414struct mlx5_ib_srq {
415 struct ib_srq ibsrq;
416 struct mlx5_core_srq msrq;
417 struct mlx5_buf buf;
418 struct mlx5_db db;
419 u64 *wrid;
420 /* protect SRQ hanlding
421 */
422 spinlock_t lock;
423 int head;
424 int tail;
425 u16 wqe_ctr;
426 struct ib_umem *umem;
427 /* serialize arming a SRQ
428 */
429 struct mutex mutex;
430 int wq_sig;
431};
432
433struct mlx5_ib_xrcd {
434 struct ib_xrcd ibxrcd;
435 u32 xrcdn;
436};
437
Haggai Erancc149f752014-12-11 17:04:21 +0200438enum mlx5_ib_mtt_access_flags {
439 MLX5_IB_MTT_READ = (1 << 0),
440 MLX5_IB_MTT_WRITE = (1 << 1),
441};
442
443#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
444
Eli Cohene126ba92013-07-07 17:25:49 +0300445struct mlx5_ib_mr {
446 struct ib_mr ibmr;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300447 void *descs;
448 dma_addr_t desc_map;
449 int ndescs;
450 int max_descs;
451 int desc_size;
Sagi Grimbergb005d312016-02-29 19:07:33 +0200452 int access_mode;
Matan Baraka606b0f2016-02-29 18:05:28 +0200453 struct mlx5_core_mkey mmkey;
Eli Cohene126ba92013-07-07 17:25:49 +0300454 struct ib_umem *umem;
455 struct mlx5_shared_mr_info *smr_info;
456 struct list_head list;
457 int order;
458 int umred;
Eli Cohene126ba92013-07-07 17:25:49 +0300459 int npages;
Eli Cohen746b5582013-10-23 09:53:14 +0300460 struct mlx5_ib_dev *dev;
461 struct mlx5_create_mkey_mbox_out out;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +0200462 struct mlx5_core_sig_ctx *sig;
Haggai Eranb4cfe442014-12-11 17:04:26 +0200463 int live;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300464 void *descs_alloc;
Noa Osherovich56e11d62016-02-29 16:46:51 +0200465 int access_flags; /* Needed for rereg MR */
Eli Cohene126ba92013-07-07 17:25:49 +0300466};
467
Matan Barakd2370e02016-02-29 18:05:30 +0200468struct mlx5_ib_mw {
469 struct ib_mw ibmw;
470 struct mlx5_core_mkey mmkey;
Eli Cohene126ba92013-07-07 17:25:49 +0300471};
472
Shachar Raindela74d2412014-05-22 14:50:12 +0300473struct mlx5_ib_umr_context {
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100474 struct ib_cqe cqe;
Shachar Raindela74d2412014-05-22 14:50:12 +0300475 enum ib_wc_status status;
476 struct completion done;
477};
478
Eli Cohene126ba92013-07-07 17:25:49 +0300479struct umr_common {
480 struct ib_pd *pd;
481 struct ib_cq *cq;
482 struct ib_qp *qp;
Eli Cohene126ba92013-07-07 17:25:49 +0300483 /* control access to UMR QP
484 */
485 struct semaphore sem;
486};
487
488enum {
489 MLX5_FMR_INVALID,
490 MLX5_FMR_VALID,
491 MLX5_FMR_BUSY,
492};
493
Eli Cohene126ba92013-07-07 17:25:49 +0300494struct mlx5_cache_ent {
495 struct list_head head;
496 /* sync access to the cahce entry
497 */
498 spinlock_t lock;
499
500
501 struct dentry *dir;
502 char name[4];
503 u32 order;
504 u32 size;
505 u32 cur;
506 u32 miss;
507 u32 limit;
508
509 struct dentry *fsize;
510 struct dentry *fcur;
511 struct dentry *fmiss;
512 struct dentry *flimit;
513
514 struct mlx5_ib_dev *dev;
515 struct work_struct work;
516 struct delayed_work dwork;
Eli Cohen746b5582013-10-23 09:53:14 +0300517 int pending;
Eli Cohene126ba92013-07-07 17:25:49 +0300518};
519
520struct mlx5_mr_cache {
521 struct workqueue_struct *wq;
522 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
523 int stopped;
524 struct dentry *root;
525 unsigned long last_add;
526};
527
Haggai Erand16e91d2016-02-29 15:45:05 +0200528struct mlx5_ib_gsi_qp;
529
530struct mlx5_ib_port_resources {
Haggai Eran7722f472016-02-29 15:45:07 +0200531 struct mlx5_ib_resources *devr;
Haggai Erand16e91d2016-02-29 15:45:05 +0200532 struct mlx5_ib_gsi_qp *gsi;
Haggai Eran7722f472016-02-29 15:45:07 +0200533 struct work_struct pkey_change_work;
Haggai Erand16e91d2016-02-29 15:45:05 +0200534};
535
Eli Cohene126ba92013-07-07 17:25:49 +0300536struct mlx5_ib_resources {
537 struct ib_cq *c0;
538 struct ib_xrcd *x0;
539 struct ib_xrcd *x1;
540 struct ib_pd *p0;
541 struct ib_srq *s0;
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +0300542 struct ib_srq *s1;
Haggai Erand16e91d2016-02-29 15:45:05 +0200543 struct mlx5_ib_port_resources ports[2];
544 /* Protects changes to the port resources */
545 struct mutex mutex;
Eli Cohene126ba92013-07-07 17:25:49 +0300546};
547
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200548struct mlx5_roce {
549 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
550 * netdev pointer
551 */
552 rwlock_t netdev_lock;
553 struct net_device *netdev;
554 struct notifier_block nb;
555};
556
Eli Cohene126ba92013-07-07 17:25:49 +0300557struct mlx5_ib_dev {
558 struct ib_device ib_dev;
Jack Morgenstein9603b612014-07-28 23:30:22 +0300559 struct mlx5_core_dev *mdev;
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200560 struct mlx5_roce roce;
Eli Cohene126ba92013-07-07 17:25:49 +0300561 MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300562 int num_ports;
Eli Cohene126ba92013-07-07 17:25:49 +0300563 /* serialize update of capability mask
564 */
565 struct mutex cap_mask_mutex;
566 bool ib_active;
567 struct umr_common umrc;
568 /* sync used page count stats
569 */
Eli Cohene126ba92013-07-07 17:25:49 +0300570 struct mlx5_ib_resources devr;
571 struct mlx5_mr_cache cache;
Eli Cohen746b5582013-10-23 09:53:14 +0300572 struct timer_list delay_timer;
573 int fill_delay;
Haggai Eran8cdd3122014-12-11 17:04:20 +0200574#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
575 struct ib_odp_caps odp_caps;
Haggai Eran6aec21f2014-12-11 17:04:23 +0200576 /*
577 * Sleepable RCU that prevents destruction of MRs while they are still
578 * being used by a page fault handler.
579 */
580 struct srcu_struct mr_srcu;
Haggai Eran8cdd3122014-12-11 17:04:20 +0200581#endif
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200582 struct mlx5_ib_flow_db flow_db;
Eli Cohene126ba92013-07-07 17:25:49 +0300583};
584
585static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
586{
587 return container_of(mcq, struct mlx5_ib_cq, mcq);
588}
589
590static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
591{
592 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
593}
594
595static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
596{
597 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
598}
599
Eli Cohene126ba92013-07-07 17:25:49 +0300600static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
601{
602 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
603}
604
605static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
606{
majd@mellanox.com19098df2016-01-14 19:13:03 +0200607 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
Eli Cohene126ba92013-07-07 17:25:49 +0300608}
609
Matan Baraka606b0f2016-02-29 18:05:28 +0200610static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200611{
Matan Baraka606b0f2016-02-29 18:05:28 +0200612 return container_of(mmkey, struct mlx5_ib_mr, mmkey);
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200613}
614
Eli Cohene126ba92013-07-07 17:25:49 +0300615static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
616{
617 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
618}
619
620static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
621{
622 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
623}
624
625static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
626{
627 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
628}
629
630static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
631{
632 return container_of(msrq, struct mlx5_ib_srq, msrq);
633}
634
635static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
636{
637 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
638}
639
Matan Barakd2370e02016-02-29 18:05:30 +0200640static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
641{
642 return container_of(ibmw, struct mlx5_ib_mw, ibmw);
643}
644
Eli Cohene126ba92013-07-07 17:25:49 +0300645struct mlx5_ib_ah {
646 struct ib_ah ibah;
647 struct mlx5_av av;
648};
649
650static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
651{
652 return container_of(ibah, struct mlx5_ib_ah, ibah);
653}
654
Eli Cohene126ba92013-07-07 17:25:49 +0300655int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
656 struct mlx5_db *db);
657void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
658void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
659void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
660void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
661int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
Ira Weinya97e2d82015-05-31 17:15:30 -0400662 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
663 const void *in_mad, void *response_mad);
Eli Cohene126ba92013-07-07 17:25:49 +0300664struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
665int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
666int mlx5_ib_destroy_ah(struct ib_ah *ah);
667struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
668 struct ib_srq_init_attr *init_attr,
669 struct ib_udata *udata);
670int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
671 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
672int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
673int mlx5_ib_destroy_srq(struct ib_srq *srq);
674int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
675 struct ib_recv_wr **bad_wr);
676struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
677 struct ib_qp_init_attr *init_attr,
678 struct ib_udata *udata);
679int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
680 int attr_mask, struct ib_udata *udata);
681int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
682 struct ib_qp_init_attr *qp_init_attr);
683int mlx5_ib_destroy_qp(struct ib_qp *qp);
684int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
685 struct ib_send_wr **bad_wr);
686int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
687 struct ib_recv_wr **bad_wr);
688void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
Haggai Eranc1395a22014-12-11 17:04:14 +0200689int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
majd@mellanox.com19098df2016-01-14 19:13:03 +0200690 void *buffer, u32 length,
691 struct mlx5_ib_qp_base *base);
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300692struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
693 const struct ib_cq_init_attr *attr,
694 struct ib_ucontext *context,
Eli Cohene126ba92013-07-07 17:25:49 +0300695 struct ib_udata *udata);
696int mlx5_ib_destroy_cq(struct ib_cq *cq);
697int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
698int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
699int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
700int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
701struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
702struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
703 u64 virt_addr, int access_flags,
704 struct ib_udata *udata);
Matan Barakd2370e02016-02-29 18:05:30 +0200705struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
706 struct ib_udata *udata);
707int mlx5_ib_dealloc_mw(struct ib_mw *mw);
Haggai Eran832a6b02014-12-11 17:04:22 +0200708int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
709 int npages, int zap);
Noa Osherovich56e11d62016-02-29 16:46:51 +0200710int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
711 u64 length, u64 virt_addr, int access_flags,
712 struct ib_pd *pd, struct ib_udata *udata);
Eli Cohene126ba92013-07-07 17:25:49 +0300713int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
Sagi Grimberg9bee1782015-07-30 10:32:35 +0300714struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
715 enum ib_mr_type mr_type,
716 u32 max_num_sg);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300717int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
718 struct scatterlist *sg,
719 int sg_nents);
Eli Cohene126ba92013-07-07 17:25:49 +0300720int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
Ira Weinya97e2d82015-05-31 17:15:30 -0400721 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
Ira Weiny4cd7c942015-06-06 14:38:31 -0400722 const struct ib_mad_hdr *in, size_t in_mad_size,
723 struct ib_mad_hdr *out, size_t *out_mad_size,
724 u16 *out_mad_pkey_index);
Eli Cohene126ba92013-07-07 17:25:49 +0300725struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
726 struct ib_ucontext *context,
727 struct ib_udata *udata);
728int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
Eli Cohene126ba92013-07-07 17:25:49 +0300729int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
730int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300731int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
732 struct ib_smp *out_mad);
733int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
734 __be64 *sys_image_guid);
735int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
736 u16 *max_pkeys);
737int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
738 u32 *vendor_id);
739int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
740int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
741int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
742 u16 *pkey);
743int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
744 union ib_gid *gid);
745int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
746 struct ib_port_attr *props);
Eli Cohene126ba92013-07-07 17:25:49 +0300747int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
748 struct ib_port_attr *props);
749int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
750void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
751void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
752 int *ncont, int *order);
Haggai Eran832a6b02014-12-11 17:04:22 +0200753void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
754 int page_shift, size_t offset, size_t num_pages,
755 __be64 *pas, int access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +0300756void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
Haggai Erancc149f752014-12-11 17:04:21 +0200757 int page_shift, __be64 *pas, int access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +0300758void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
759int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
760int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
761int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
762int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200763int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
764 struct ib_mr_status *mr_status);
Eli Cohene126ba92013-07-07 17:25:49 +0300765
Haggai Eran8cdd3122014-12-11 17:04:20 +0200766#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Haggai Eran6aec21f2014-12-11 17:04:23 +0200767extern struct workqueue_struct *mlx5_ib_page_fault_wq;
768
Saeed Mahameed938fe832015-05-28 22:28:41 +0300769void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
Haggai Eran6aec21f2014-12-11 17:04:23 +0200770void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
771 struct mlx5_ib_pfault *pfault);
772void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
773int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
774void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
775int __init mlx5_ib_odp_init(void);
776void mlx5_ib_odp_cleanup(void);
777void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
778void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200779void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
780 unsigned long end);
Haggai Eran6aec21f2014-12-11 17:04:23 +0200781#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
Saeed Mahameed938fe832015-05-28 22:28:41 +0300782static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
Haggai Eran8cdd3122014-12-11 17:04:20 +0200783{
Saeed Mahameed938fe832015-05-28 22:28:41 +0300784 return;
Haggai Eran8cdd3122014-12-11 17:04:20 +0200785}
Haggai Eran6aec21f2014-12-11 17:04:23 +0200786
787static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
788static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
789static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
790static inline int mlx5_ib_odp_init(void) { return 0; }
791static inline void mlx5_ib_odp_cleanup(void) {}
792static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
793static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
794
Haggai Eran8cdd3122014-12-11 17:04:20 +0200795#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
796
Arnd Bergmann9967c702016-03-23 11:37:45 +0100797int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
798 u8 port, struct ifla_vf_info *info);
799int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
800 u8 port, int state);
801int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
802 u8 port, struct ifla_vf_stats *stats);
803int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
804 u64 guid, int type);
805
Achiad Shochat2811ba52015-12-23 18:47:24 +0200806__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
807 int index);
808
Haggai Erand16e91d2016-02-29 15:45:05 +0200809/* GSI QP helper functions */
810struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
811 struct ib_qp_init_attr *init_attr);
812int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
813int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
814 int attr_mask);
815int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
816 int qp_attr_mask,
817 struct ib_qp_init_attr *qp_init_attr);
818int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
819 struct ib_send_wr **bad_wr);
820int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
821 struct ib_recv_wr **bad_wr);
Haggai Eran7722f472016-02-29 15:45:07 +0200822void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
Haggai Erand16e91d2016-02-29 15:45:05 +0200823
Haggai Eran25361e02016-02-29 15:45:08 +0200824int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
825
Eli Cohene126ba92013-07-07 17:25:49 +0300826static inline void init_query_mad(struct ib_smp *mad)
827{
828 mad->base_version = 1;
829 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
830 mad->class_version = 1;
831 mad->method = IB_MGMT_METHOD_GET;
832}
833
834static inline u8 convert_access(int acc)
835{
836 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
837 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
838 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
839 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
840 MLX5_PERM_LOCAL_READ;
841}
842
Sagi Grimbergb6364012015-09-02 22:23:04 +0300843static inline int is_qp1(enum ib_qp_type qp_type)
844{
Haggai Erand16e91d2016-02-29 15:45:05 +0200845 return qp_type == MLX5_IB_QPT_HW_GSI;
Sagi Grimbergb6364012015-09-02 22:23:04 +0300846}
847
Haggai Erancc149f752014-12-11 17:04:21 +0200848#define MLX5_MAX_UMR_SHIFT 16
849#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
850
Leon Romanovsky051f2632015-12-20 12:16:11 +0200851static inline u32 check_cq_create_flags(u32 flags)
852{
853 /*
854 * It returns non-zero value for unsupported CQ
855 * create flags, otherwise it returns zero.
856 */
Leon Romanovsky34356f62015-12-29 17:01:30 +0200857 return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN |
858 IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
Leon Romanovsky051f2632015-12-20 12:16:11 +0200859}
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200860
861static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
862 u32 *user_index)
863{
864 if (cqe_version) {
865 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
866 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
867 return -EINVAL;
868 *user_index = cmd_uidx;
869 } else {
870 *user_index = MLX5_IB_DEFAULT_UIDX;
871 }
872
873 return 0;
874}
Eli Cohene126ba92013-07-07 17:25:49 +0300875#endif /* MLX5_IB_H */