blob: 71ff356ee7020d881b35d71747076f28530cb5e7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andy Groverec162272009-02-24 15:30:30 +00002#ifndef _RDS_IB_H
3#define _RDS_IB_H
4
5#include <rdma/ib_verbs.h>
6#include <rdma/rdma_cm.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +00007#include <linux/interrupt.h>
Andy Grovere4c52c92010-04-23 10:49:53 -07008#include <linux/pci.h>
9#include <linux/slab.h>
Andy Groverec162272009-02-24 15:30:30 +000010#include "rds.h"
11#include "rdma_transport.h"
12
Andy Groverec162272009-02-24 15:30:30 +000013#define RDS_IB_MAX_SGE 8
14#define RDS_IB_RECV_SGE 2
15
16#define RDS_IB_DEFAULT_RECV_WR 1024
17#define RDS_IB_DEFAULT_SEND_WR 256
Santosh Shilimkar56012452016-03-08 09:19:01 -080018#define RDS_IB_DEFAULT_FR_WR 256
19#define RDS_IB_DEFAULT_FR_INV_WR 256
Andy Groverec162272009-02-24 15:30:30 +000020
Santosh Shilimkarfab8688d2016-07-04 15:31:21 -070021#define RDS_IB_DEFAULT_RETRY_COUNT 1
Andy Grover3ba23ad2009-07-17 13:13:22 +000022
Andy Groverec162272009-02-24 15:30:30 +000023#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
24
Chris Mason33244122010-05-26 22:05:37 -070025#define RDS_IB_RECYCLE_BATCH_COUNT 32
26
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -040027#define RDS_IB_WC_MAX 32
28
Zach Brownea819862010-07-15 12:34:33 -070029extern struct rw_semaphore rds_ib_devices_lock;
Andy Groverec162272009-02-24 15:30:30 +000030extern struct list_head rds_ib_devices;
31
32/*
33 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
34 * try and minimize the amount of memory tied up both the device and
35 * socket receive queues.
36 */
Andy Groverec162272009-02-24 15:30:30 +000037struct rds_page_frag {
38 struct list_head f_item;
Chris Mason33244122010-05-26 22:05:37 -070039 struct list_head f_cache_entry;
Andy Grover0b088e02010-05-24 20:12:41 -070040 struct scatterlist f_sg;
Andy Groverec162272009-02-24 15:30:30 +000041};
42
43struct rds_ib_incoming {
44 struct list_head ii_frags;
Chris Mason33244122010-05-26 22:05:37 -070045 struct list_head ii_cache_entry;
Andy Groverec162272009-02-24 15:30:30 +000046 struct rds_incoming ii_inc;
47};
48
Chris Mason33244122010-05-26 22:05:37 -070049struct rds_ib_cache_head {
50 struct list_head *first;
51 unsigned long count;
52};
53
54struct rds_ib_refill_cache {
Shan Weiae4b46e2012-11-12 15:52:01 +000055 struct rds_ib_cache_head __percpu *percpu;
Chris Mason33244122010-05-26 22:05:37 -070056 struct list_head *xfer;
57 struct list_head *ready;
58};
59
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070060/* This is the common structure for the IB private data exchange in setting up
61 * an RDS connection. The exchange is different for IPv4 and IPv6 connections.
62 * The reason is that the address size is different and the addresses
63 * exchanged are in the beginning of the structure. Hence it is not possible
64 * for interoperability if same structure is used.
65 */
66struct rds_ib_conn_priv_cmn {
67 u8 ricpc_protocol_major;
68 u8 ricpc_protocol_minor;
69 __be16 ricpc_protocol_minor_mask; /* bitmask */
70 __be32 ricpc_reserved1;
71 __be64 ricpc_ack_seq;
72 __be32 ricpc_credit; /* non-zero enables flow ctl */
73};
74
Andy Groverec162272009-02-24 15:30:30 +000075struct rds_ib_connect_private {
76 /* Add new fields at the end, and don't permute existing fields. */
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070077 __be32 dp_saddr;
78 __be32 dp_daddr;
79 struct rds_ib_conn_priv_cmn dp_cmn;
80};
81
82struct rds6_ib_connect_private {
83 /* Add new fields at the end, and don't permute existing fields. */
84 struct in6_addr dp_saddr;
85 struct in6_addr dp_daddr;
86 struct rds_ib_conn_priv_cmn dp_cmn;
87};
88
89#define dp_protocol_major dp_cmn.ricpc_protocol_major
90#define dp_protocol_minor dp_cmn.ricpc_protocol_minor
91#define dp_protocol_minor_mask dp_cmn.ricpc_protocol_minor_mask
92#define dp_ack_seq dp_cmn.ricpc_ack_seq
93#define dp_credit dp_cmn.ricpc_credit
94
95union rds_ib_conn_priv {
96 struct rds_ib_connect_private ricp_v4;
97 struct rds6_ib_connect_private ricp_v6;
Andy Groverec162272009-02-24 15:30:30 +000098};
99
100struct rds_ib_send_work {
Andy Groverff3d7d32010-03-01 14:03:09 -0800101 void *s_op;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100102 union {
103 struct ib_send_wr s_wr;
104 struct ib_rdma_wr s_rdma_wr;
105 struct ib_atomic_wr s_atomic_wr;
106 };
Andy Groverec162272009-02-24 15:30:30 +0000107 struct ib_sge s_sge[RDS_IB_MAX_SGE];
108 unsigned long s_queued;
109};
110
111struct rds_ib_recv_work {
112 struct rds_ib_incoming *r_ibinc;
113 struct rds_page_frag *r_frag;
114 struct ib_recv_wr r_wr;
115 struct ib_sge r_sge[2];
116};
117
118struct rds_ib_work_ring {
119 u32 w_nr;
120 u32 w_alloc_ptr;
121 u32 w_alloc_ctr;
122 u32 w_free_ptr;
123 atomic_t w_free_ctr;
124};
125
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400126/* Rings are posted with all the allocations they'll need to queue the
127 * incoming message to the receiving socket so this can't fail.
128 * All fragments start with a header, so we can make sure we're not receiving
129 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
130 */
131struct rds_ib_ack_state {
132 u64 ack_next;
133 u64 ack_recv;
134 unsigned int ack_required:1;
135 unsigned int ack_next_valid:1;
136 unsigned int ack_recv_valid:1;
137};
138
139
Andy Groverec162272009-02-24 15:30:30 +0000140struct rds_ib_device;
141
142struct rds_ib_connection {
143
144 struct list_head ib_node;
145 struct rds_ib_device *rds_ibdev;
146 struct rds_connection *conn;
147
148 /* alphabet soup, IBTA style */
149 struct rdma_cm_id *i_cm_id;
150 struct ib_pd *i_pd;
Andy Groverec162272009-02-24 15:30:30 +0000151 struct ib_cq *i_send_cq;
152 struct ib_cq *i_recv_cq;
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400153 struct ib_wc i_send_wc[RDS_IB_WC_MAX];
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400154 struct ib_wc i_recv_wc[RDS_IB_WC_MAX];
155
santosh.shilimkar@oracle.comad6832f2016-03-01 15:20:53 -0800156 /* To control the number of wrs from fastreg */
157 atomic_t i_fastreg_wrs;
Santosh Shilimkar56012452016-03-08 09:19:01 -0800158 atomic_t i_fastunreg_wrs;
santosh.shilimkar@oracle.comad6832f2016-03-01 15:20:53 -0800159
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400160 /* interrupt handling */
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400161 struct tasklet_struct i_send_tasklet;
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400162 struct tasklet_struct i_recv_tasklet;
Andy Groverec162272009-02-24 15:30:30 +0000163
164 /* tx */
165 struct rds_ib_work_ring i_send_ring;
Andy Groverff3d7d32010-03-01 14:03:09 -0800166 struct rm_data_op *i_data_op;
Andy Groverec162272009-02-24 15:30:30 +0000167 struct rds_header *i_send_hdrs;
Bart Van Assched43dbac2017-01-20 13:04:10 -0800168 dma_addr_t i_send_hdrs_dma;
Andy Groverec162272009-02-24 15:30:30 +0000169 struct rds_ib_send_work *i_sends;
Zach Brownf0460112010-07-14 13:55:35 -0700170 atomic_t i_signaled_sends;
Andy Groverec162272009-02-24 15:30:30 +0000171
172 /* rx */
173 struct mutex i_recv_mutex;
174 struct rds_ib_work_ring i_recv_ring;
175 struct rds_ib_incoming *i_ibinc;
176 u32 i_recv_data_rem;
177 struct rds_header *i_recv_hdrs;
Bart Van Assched43dbac2017-01-20 13:04:10 -0800178 dma_addr_t i_recv_hdrs_dma;
Andy Groverec162272009-02-24 15:30:30 +0000179 struct rds_ib_recv_work *i_recvs;
Andy Groverec162272009-02-24 15:30:30 +0000180 u64 i_ack_recv; /* last ACK received */
Chris Mason33244122010-05-26 22:05:37 -0700181 struct rds_ib_refill_cache i_cache_incs;
182 struct rds_ib_refill_cache i_cache_frags;
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700183 atomic_t i_cache_allocs;
Andy Groverec162272009-02-24 15:30:30 +0000184
185 /* sending acks */
186 unsigned long i_ack_flags;
Andy Grover8cbd9602009-04-01 08:20:20 +0000187#ifdef KERNEL_HAS_ATOMIC64
188 atomic64_t i_ack_next; /* next ACK to send */
189#else
190 spinlock_t i_ack_lock; /* protect i_ack_next */
Andy Groverec162272009-02-24 15:30:30 +0000191 u64 i_ack_next; /* next ACK to send */
Andy Grover8cbd9602009-04-01 08:20:20 +0000192#endif
Andy Groverec162272009-02-24 15:30:30 +0000193 struct rds_header *i_ack;
194 struct ib_send_wr i_ack_wr;
195 struct ib_sge i_ack_sge;
Bart Van Assched43dbac2017-01-20 13:04:10 -0800196 dma_addr_t i_ack_dma;
Andy Groverec162272009-02-24 15:30:30 +0000197 unsigned long i_ack_queued;
198
199 /* Flow control related information
200 *
201 * Our algorithm uses a pair variables that we need to access
202 * atomically - one for the send credits, and one posted
203 * recv credits we need to transfer to remote.
204 * Rather than protect them using a slow spinlock, we put both into
205 * a single atomic_t and update it using cmpxchg
206 */
207 atomic_t i_credits;
208
209 /* Protocol version specific information */
210 unsigned int i_flowctl:1; /* enable/disable flow ctl */
211
212 /* Batched completions */
213 unsigned int i_unsignaled_wrs;
Santosh Shilimkar581d53c2016-07-09 18:31:38 -0700214
215 /* Endpoint role in connection */
216 bool i_active_side;
Santosh Shilimkarcf657262016-09-29 11:07:11 -0700217 atomic_t i_cq_quiesce;
Santosh Shilimkarbe2f76e2016-07-04 16:16:36 -0700218
219 /* Send/Recv vectors */
220 int i_scq_vector;
221 int i_rcq_vector;
Andy Groverec162272009-02-24 15:30:30 +0000222};
223
224/* This assumes that atomic_t is at least 32 bits */
225#define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
226#define IB_GET_POST_CREDITS(v) ((v) >> 16)
227#define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
228#define IB_SET_POST_CREDITS(v) ((v) << 16)
229
230struct rds_ib_ipaddr {
231 struct list_head list;
232 __be32 ipaddr;
Santosh Shilimkar59fe4602012-02-03 11:09:23 -0500233 struct rcu_head rcu;
Andy Groverec162272009-02-24 15:30:30 +0000234};
235
Santosh Shilimkar06766512015-09-10 21:20:57 -0700236enum {
237 RDS_IB_MR_8K_POOL,
238 RDS_IB_MR_1M_POOL,
239};
240
Andy Groverec162272009-02-24 15:30:30 +0000241struct rds_ib_device {
242 struct list_head list;
243 struct list_head ipaddr_list;
244 struct list_head conn_list;
245 struct ib_device *dev;
246 struct ib_pd *pd;
santosh.shilimkar@oracle.com2cb29122016-03-01 15:20:52 -0800247 bool use_fastreg;
248
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800249 unsigned int max_mrs;
Santosh Shilimkar06766512015-09-10 21:20:57 -0700250 struct rds_ib_mr_pool *mr_1m_pool;
251 struct rds_ib_mr_pool *mr_8k_pool;
252 unsigned int fmr_max_remaps;
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800253 unsigned int max_8k_mrs;
254 unsigned int max_1m_mrs;
Andy Groverec162272009-02-24 15:30:30 +0000255 int max_sge;
256 unsigned int max_wrs;
Andy Grover40589e72010-01-12 10:50:48 -0800257 unsigned int max_initiator_depth;
258 unsigned int max_responder_resources;
Andy Groverec162272009-02-24 15:30:30 +0000259 spinlock_t spinlock; /* protect the above */
Reshetova, Elena50d61ff2017-07-04 15:53:15 +0300260 refcount_t refcount;
Zach Brown3e0249f2010-05-18 15:48:51 -0700261 struct work_struct free_work;
Santosh Shilimkarbe2f76e2016-07-04 16:16:36 -0700262 int *vector_load;
Andy Groverec162272009-02-24 15:30:30 +0000263};
264
Bart Van Assche5f68dca2017-01-20 13:04:34 -0800265#define ibdev_to_node(ibdev) dev_to_node((ibdev)->dev.parent)
Andy Grovere4c52c92010-04-23 10:49:53 -0700266#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
267
Andy Groverec162272009-02-24 15:30:30 +0000268/* bits for i_ack_flags */
269#define IB_ACK_IN_FLIGHT 0
270#define IB_ACK_REQUESTED 1
271
272/* Magic WR_ID for ACKs */
273#define RDS_IB_ACK_WR_ID (~(u64) 0)
274
275struct rds_ib_statistics {
276 uint64_t s_ib_connect_raced;
277 uint64_t s_ib_listen_closed_stale;
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400278 uint64_t s_ib_evt_handler_call;
279 uint64_t s_ib_tasklet_call;
Andy Groverec162272009-02-24 15:30:30 +0000280 uint64_t s_ib_tx_cq_event;
281 uint64_t s_ib_tx_ring_full;
282 uint64_t s_ib_tx_throttle;
283 uint64_t s_ib_tx_sg_mapping_failure;
284 uint64_t s_ib_tx_stalled;
285 uint64_t s_ib_tx_credit_updates;
Andy Groverec162272009-02-24 15:30:30 +0000286 uint64_t s_ib_rx_cq_event;
287 uint64_t s_ib_rx_ring_empty;
288 uint64_t s_ib_rx_refill_from_cq;
289 uint64_t s_ib_rx_refill_from_thread;
290 uint64_t s_ib_rx_alloc_limit;
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700291 uint64_t s_ib_rx_total_frags;
292 uint64_t s_ib_rx_total_incs;
Andy Groverec162272009-02-24 15:30:30 +0000293 uint64_t s_ib_rx_credit_updates;
294 uint64_t s_ib_ack_sent;
295 uint64_t s_ib_ack_send_failure;
296 uint64_t s_ib_ack_send_delayed;
297 uint64_t s_ib_ack_send_piggybacked;
298 uint64_t s_ib_ack_received;
Santosh Shilimkar06766512015-09-10 21:20:57 -0700299 uint64_t s_ib_rdma_mr_8k_alloc;
300 uint64_t s_ib_rdma_mr_8k_free;
301 uint64_t s_ib_rdma_mr_8k_used;
302 uint64_t s_ib_rdma_mr_8k_pool_flush;
303 uint64_t s_ib_rdma_mr_8k_pool_wait;
304 uint64_t s_ib_rdma_mr_8k_pool_depleted;
305 uint64_t s_ib_rdma_mr_1m_alloc;
306 uint64_t s_ib_rdma_mr_1m_free;
307 uint64_t s_ib_rdma_mr_1m_used;
308 uint64_t s_ib_rdma_mr_1m_pool_flush;
309 uint64_t s_ib_rdma_mr_1m_pool_wait;
310 uint64_t s_ib_rdma_mr_1m_pool_depleted;
santosh.shilimkar@oracle.comdb427532016-03-01 15:20:51 -0800311 uint64_t s_ib_rdma_mr_8k_reused;
312 uint64_t s_ib_rdma_mr_1m_reused;
Andy Grover51e2cba2010-03-29 17:47:30 -0700313 uint64_t s_ib_atomic_cswp;
314 uint64_t s_ib_atomic_fadd;
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700315 uint64_t s_ib_recv_added_to_cache;
316 uint64_t s_ib_recv_removed_from_cache;
Andy Groverec162272009-02-24 15:30:30 +0000317};
318
319extern struct workqueue_struct *rds_ib_wq;
320
321/*
322 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
323 * doesn't define it.
324 */
325static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200326 struct scatterlist *sglist,
327 unsigned int sg_dma_len,
328 int direction)
Andy Groverec162272009-02-24 15:30:30 +0000329{
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200330 struct scatterlist *sg;
Andy Groverec162272009-02-24 15:30:30 +0000331 unsigned int i;
332
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200333 for_each_sg(sglist, sg, sg_dma_len, i) {
Andy Groverec162272009-02-24 15:30:30 +0000334 ib_dma_sync_single_for_cpu(dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200335 ib_sg_dma_address(dev, sg),
336 ib_sg_dma_len(dev, sg),
Andy Groverec162272009-02-24 15:30:30 +0000337 direction);
338 }
339}
340#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
341
342static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200343 struct scatterlist *sglist,
344 unsigned int sg_dma_len,
345 int direction)
Andy Groverec162272009-02-24 15:30:30 +0000346{
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200347 struct scatterlist *sg;
Andy Groverec162272009-02-24 15:30:30 +0000348 unsigned int i;
349
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200350 for_each_sg(sglist, sg, sg_dma_len, i) {
Andy Groverec162272009-02-24 15:30:30 +0000351 ib_dma_sync_single_for_device(dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200352 ib_sg_dma_address(dev, sg),
353 ib_sg_dma_len(dev, sg),
Andy Groverec162272009-02-24 15:30:30 +0000354 direction);
355 }
356}
357#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
358
359
360/* ib.c */
361extern struct rds_transport rds_ib_transport;
Zach Brown3e0249f2010-05-18 15:48:51 -0700362struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
363void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
Andy Groverec162272009-02-24 15:30:30 +0000364extern struct ib_client rds_ib_client;
365
Andy Grover3ba23ad2009-07-17 13:13:22 +0000366extern unsigned int rds_ib_retry_count;
Andy Groverec162272009-02-24 15:30:30 +0000367
368extern spinlock_t ib_nodev_conns_lock;
369extern struct list_head ib_nodev_conns;
370
371/* ib_cm.c */
372int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
373void rds_ib_conn_free(void *arg);
Sowmini Varadhanb04e8552016-06-30 16:11:16 -0700374int rds_ib_conn_path_connect(struct rds_conn_path *cp);
Sowmini Varadhan226f7a72016-06-30 16:11:10 -0700375void rds_ib_conn_path_shutdown(struct rds_conn_path *cp);
Andy Groverec162272009-02-24 15:30:30 +0000376void rds_ib_state_change(struct sock *sk);
Zach Brownef87b7e2010-07-09 12:26:20 -0700377int rds_ib_listen_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000378void rds_ib_listen_stop(void);
Nicolas Iooss6cdaf032016-08-05 22:11:12 +0200379__printf(2, 3)
Andy Groverec162272009-02-24 15:30:30 +0000380void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
381int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700382 struct rdma_cm_event *event, bool isv6);
383int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6);
Andy Groverec162272009-02-24 15:30:30 +0000384void rds_ib_cm_connect_complete(struct rds_connection *conn,
385 struct rdma_cm_event *event);
386
387
388#define rds_ib_conn_error(conn, fmt...) \
389 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
390
391/* ib_rdma.c */
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700392int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
393 struct in6_addr *ipaddr);
Andy Grover745cbcc2009-04-01 08:20:19 +0000394void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
395void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
Zach Brown8aeb1ba2010-06-25 14:58:16 -0700396void rds_ib_destroy_nodev_conns(void);
Avinash Repaka16591852016-03-01 15:20:54 -0800397void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
Andy Groverec162272009-02-24 15:30:30 +0000398
399/* ib_recv.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700400int rds_ib_recv_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000401void rds_ib_recv_exit(void);
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700402int rds_ib_recv_path(struct rds_conn_path *conn);
Ka-Cheong Poonf394ad22018-07-30 22:48:41 -0700403int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp);
Chris Mason33244122010-05-26 22:05:37 -0700404void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700405void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
Andy Groverec162272009-02-24 15:30:30 +0000406void rds_ib_inc_free(struct rds_incoming *inc);
Al Viroc310e722014-11-20 09:21:14 -0500407int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400408void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
409 struct rds_ib_ack_state *state);
Andy Groverd521b632009-10-30 08:51:57 +0000410void rds_ib_recv_tasklet_fn(unsigned long data);
Andy Groverec162272009-02-24 15:30:30 +0000411void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
412void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
413void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
414void rds_ib_attempt_ack(struct rds_ib_connection *ic);
415void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
416u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400417void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required);
Andy Groverec162272009-02-24 15:30:30 +0000418
419/* ib_ring.c */
420void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
421void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
422u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
423void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
424void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
425int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
426int rds_ib_ring_low(struct rds_ib_work_ring *ring);
427u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
428u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
429extern wait_queue_head_t rds_ib_ring_empty_wait;
430
431/* ib_send.c */
Sowmini Varadhan226f7a72016-06-30 16:11:10 -0700432void rds_ib_xmit_path_complete(struct rds_conn_path *cp);
Andy Groverec162272009-02-24 15:30:30 +0000433int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
434 unsigned int hdr_off, unsigned int sg, unsigned int off);
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400435void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
Andy Groverec162272009-02-24 15:30:30 +0000436void rds_ib_send_init_ring(struct rds_ib_connection *ic);
437void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800438int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000439void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
440void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
441int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
Steve Wise7b70d032009-04-09 14:09:39 +0000442 u32 *adv_credits, int need_posted, int max_posted);
Andy Groverff3d7d32010-03-01 14:03:09 -0800443int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000444
445/* ib_stats.c */
David S. Miller16fdf8b2018-09-23 12:25:15 -0700446DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
Andy Groverec162272009-02-24 15:30:30 +0000447#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700448#define rds_ib_stats_add(member, count) \
449 rds_stats_add_which(rds_ib_stats, member, count)
Andy Groverec162272009-02-24 15:30:30 +0000450unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
451 unsigned int avail);
452
453/* ib_sysctl.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700454int rds_ib_sysctl_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000455void rds_ib_sysctl_exit(void);
456extern unsigned long rds_ib_sysctl_max_send_wr;
457extern unsigned long rds_ib_sysctl_max_recv_wr;
458extern unsigned long rds_ib_sysctl_max_unsig_wrs;
459extern unsigned long rds_ib_sysctl_max_unsig_bytes;
460extern unsigned long rds_ib_sysctl_max_recv_allocation;
461extern unsigned int rds_ib_sysctl_flow_control;
Andy Groverec162272009-02-24 15:30:30 +0000462
Andy Groverec162272009-02-24 15:30:30 +0000463#endif