blob: 0b9beeda6053396f90b8ce5f6622ceda72a3a89d [file] [log] [blame]
Ursula Braunf38ba1792017-01-09 16:55:19 +01001/*
2 * Shared Memory Communications over RDMA (SMC-R) and RoCE
3 *
4 * Work Requests exploiting Infiniband API
5 *
6 * Copyright IBM Corp. 2016
7 *
8 * Author(s): Steffen Maier <maier@linux.vnet.ibm.com>
9 */
10
11#ifndef SMC_WR_H
12#define SMC_WR_H
13
14#include <linux/atomic.h>
15#include <rdma/ib_verbs.h>
16#include <asm/div64.h>
17
18#include "smc.h"
19#include "smc_core.h"
20
21#define SMC_WR_MAX_CQE 32768 /* max. # of completion queue elements */
22#define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */
23
24#define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ)
25#define SMC_WR_TX_WAIT_PENDING_TIME (5 * HZ)
26
27#define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */
28
29#define SMC_WR_TX_PEND_PRIV_SIZE 32
30
31struct smc_wr_tx_pend_priv {
32 u8 priv[SMC_WR_TX_PEND_PRIV_SIZE];
33};
34
35typedef void (*smc_wr_tx_handler)(struct smc_wr_tx_pend_priv *,
36 struct smc_link *,
37 enum ib_wc_status);
38
Ursula Braun5f083182017-01-09 16:55:22 +010039typedef bool (*smc_wr_tx_filter)(struct smc_wr_tx_pend_priv *,
40 unsigned long);
41
42typedef void (*smc_wr_tx_dismisser)(struct smc_wr_tx_pend_priv *);
43
Ursula Braunf38ba1792017-01-09 16:55:19 +010044struct smc_wr_rx_handler {
45 struct hlist_node list; /* hash table collision resolution */
46 void (*handler)(struct ib_wc *, void *);
47 u8 type;
48};
49
50/* Only used by RDMA write WRs.
51 * All other WRs (CDC/LLC) use smc_wr_tx_send handling WR_ID implicitly
52 */
53static inline long smc_wr_tx_get_next_wr_id(struct smc_link *link)
54{
55 return atomic_long_inc_return(&link->wr_tx_id);
56}
57
58static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
59{
60 atomic_long_set(wr_tx_id, val);
61}
62
63/* post a new receive work request to fill a completed old work request entry */
64static inline int smc_wr_rx_post(struct smc_link *link)
65{
66 struct ib_recv_wr *bad_recv_wr = NULL;
67 int rc;
68 u64 wr_id, temp_wr_id;
69 u32 index;
70
71 wr_id = ++link->wr_rx_id; /* tasklet context, thus not atomic */
72 temp_wr_id = wr_id;
73 index = do_div(temp_wr_id, link->wr_rx_cnt);
74 link->wr_rx_ibs[index].wr_id = wr_id;
75 rc = ib_post_recv(link->roce_qp, &link->wr_rx_ibs[index], &bad_recv_wr);
76 return rc;
77}
78
79int smc_wr_create_link(struct smc_link *lnk);
80int smc_wr_alloc_link_mem(struct smc_link *lnk);
81void smc_wr_free_link(struct smc_link *lnk);
82void smc_wr_free_link_mem(struct smc_link *lnk);
83void smc_wr_remember_qp_attr(struct smc_link *lnk);
84void smc_wr_remove_dev(struct smc_ib_device *smcibdev);
85void smc_wr_add_dev(struct smc_ib_device *smcibdev);
86
87int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler,
88 struct smc_wr_buf **wr_buf,
89 struct smc_wr_tx_pend_priv **wr_pend_priv);
90int smc_wr_tx_put_slot(struct smc_link *link,
91 struct smc_wr_tx_pend_priv *wr_pend_priv);
92int smc_wr_tx_send(struct smc_link *link,
93 struct smc_wr_tx_pend_priv *wr_pend_priv);
94void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context);
Ursula Braunb38d7322017-01-09 16:55:25 +010095bool smc_wr_tx_has_pending(struct smc_link *link, u8 wr_rx_hdr_type,
96 smc_wr_tx_filter filter, unsigned long data);
Ursula Braun5f083182017-01-09 16:55:22 +010097void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type,
98 smc_wr_tx_filter filter,
99 smc_wr_tx_dismisser dismisser,
100 unsigned long data);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100101
102int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler);
103int smc_wr_rx_post_init(struct smc_link *link);
104void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context);
105
106#endif /* SMC_WR_H */