blob: a6f4d7d68e954ddfd95584c4197786a8a200211b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andy Groverec162272009-02-24 15:30:30 +00002#ifndef _RDS_IB_H
3#define _RDS_IB_H
4
5#include <rdma/ib_verbs.h>
6#include <rdma/rdma_cm.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +00007#include <linux/interrupt.h>
Andy Grovere4c52c92010-04-23 10:49:53 -07008#include <linux/pci.h>
9#include <linux/slab.h>
Andy Groverec162272009-02-24 15:30:30 +000010#include "rds.h"
11#include "rdma_transport.h"
12
Andy Groverec162272009-02-24 15:30:30 +000013#define RDS_IB_MAX_SGE 8
14#define RDS_IB_RECV_SGE 2
15
16#define RDS_IB_DEFAULT_RECV_WR 1024
17#define RDS_IB_DEFAULT_SEND_WR 256
Santosh Shilimkar56012452016-03-08 09:19:01 -080018#define RDS_IB_DEFAULT_FR_WR 256
19#define RDS_IB_DEFAULT_FR_INV_WR 256
Andy Groverec162272009-02-24 15:30:30 +000020
Santosh Shilimkarfab8688d2016-07-04 15:31:21 -070021#define RDS_IB_DEFAULT_RETRY_COUNT 1
Andy Grover3ba23ad2009-07-17 13:13:22 +000022
Andy Groverec162272009-02-24 15:30:30 +000023#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
24
Chris Mason33244122010-05-26 22:05:37 -070025#define RDS_IB_RECYCLE_BATCH_COUNT 32
26
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -040027#define RDS_IB_WC_MAX 32
28
Zach Brownea819862010-07-15 12:34:33 -070029extern struct rw_semaphore rds_ib_devices_lock;
Andy Groverec162272009-02-24 15:30:30 +000030extern struct list_head rds_ib_devices;
31
32/*
33 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
34 * try and minimize the amount of memory tied up both the device and
35 * socket receive queues.
36 */
Andy Groverec162272009-02-24 15:30:30 +000037struct rds_page_frag {
38 struct list_head f_item;
Chris Mason33244122010-05-26 22:05:37 -070039 struct list_head f_cache_entry;
Andy Grover0b088e02010-05-24 20:12:41 -070040 struct scatterlist f_sg;
Andy Groverec162272009-02-24 15:30:30 +000041};
42
43struct rds_ib_incoming {
44 struct list_head ii_frags;
Chris Mason33244122010-05-26 22:05:37 -070045 struct list_head ii_cache_entry;
Andy Groverec162272009-02-24 15:30:30 +000046 struct rds_incoming ii_inc;
47};
48
Chris Mason33244122010-05-26 22:05:37 -070049struct rds_ib_cache_head {
50 struct list_head *first;
51 unsigned long count;
52};
53
54struct rds_ib_refill_cache {
Shan Weiae4b46e2012-11-12 15:52:01 +000055 struct rds_ib_cache_head __percpu *percpu;
Chris Mason33244122010-05-26 22:05:37 -070056 struct list_head *xfer;
57 struct list_head *ready;
58};
59
Andy Groverec162272009-02-24 15:30:30 +000060struct rds_ib_connect_private {
61 /* Add new fields at the end, and don't permute existing fields. */
62 __be32 dp_saddr;
63 __be32 dp_daddr;
64 u8 dp_protocol_major;
65 u8 dp_protocol_minor;
66 __be16 dp_protocol_minor_mask; /* bitmask */
67 __be32 dp_reserved1;
68 __be64 dp_ack_seq;
69 __be32 dp_credit; /* non-zero enables flow ctl */
70};
71
72struct rds_ib_send_work {
Andy Groverff3d7d32010-03-01 14:03:09 -080073 void *s_op;
Christoph Hellwige622f2f2015-10-08 09:16:33 +010074 union {
75 struct ib_send_wr s_wr;
76 struct ib_rdma_wr s_rdma_wr;
77 struct ib_atomic_wr s_atomic_wr;
78 };
Andy Groverec162272009-02-24 15:30:30 +000079 struct ib_sge s_sge[RDS_IB_MAX_SGE];
80 unsigned long s_queued;
81};
82
83struct rds_ib_recv_work {
84 struct rds_ib_incoming *r_ibinc;
85 struct rds_page_frag *r_frag;
86 struct ib_recv_wr r_wr;
87 struct ib_sge r_sge[2];
88};
89
90struct rds_ib_work_ring {
91 u32 w_nr;
92 u32 w_alloc_ptr;
93 u32 w_alloc_ctr;
94 u32 w_free_ptr;
95 atomic_t w_free_ctr;
96};
97
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -040098/* Rings are posted with all the allocations they'll need to queue the
99 * incoming message to the receiving socket so this can't fail.
100 * All fragments start with a header, so we can make sure we're not receiving
101 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
102 */
103struct rds_ib_ack_state {
104 u64 ack_next;
105 u64 ack_recv;
106 unsigned int ack_required:1;
107 unsigned int ack_next_valid:1;
108 unsigned int ack_recv_valid:1;
109};
110
111
Andy Groverec162272009-02-24 15:30:30 +0000112struct rds_ib_device;
113
114struct rds_ib_connection {
115
116 struct list_head ib_node;
117 struct rds_ib_device *rds_ibdev;
118 struct rds_connection *conn;
119
120 /* alphabet soup, IBTA style */
121 struct rdma_cm_id *i_cm_id;
122 struct ib_pd *i_pd;
Andy Groverec162272009-02-24 15:30:30 +0000123 struct ib_cq *i_send_cq;
124 struct ib_cq *i_recv_cq;
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400125 struct ib_wc i_send_wc[RDS_IB_WC_MAX];
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400126 struct ib_wc i_recv_wc[RDS_IB_WC_MAX];
127
santosh.shilimkar@oracle.comad6832f2016-03-01 15:20:53 -0800128 /* To control the number of wrs from fastreg */
129 atomic_t i_fastreg_wrs;
Santosh Shilimkar56012452016-03-08 09:19:01 -0800130 atomic_t i_fastunreg_wrs;
santosh.shilimkar@oracle.comad6832f2016-03-01 15:20:53 -0800131
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400132 /* interrupt handling */
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400133 struct tasklet_struct i_send_tasklet;
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400134 struct tasklet_struct i_recv_tasklet;
Andy Groverec162272009-02-24 15:30:30 +0000135
136 /* tx */
137 struct rds_ib_work_ring i_send_ring;
Andy Groverff3d7d32010-03-01 14:03:09 -0800138 struct rm_data_op *i_data_op;
Andy Groverec162272009-02-24 15:30:30 +0000139 struct rds_header *i_send_hdrs;
Bart Van Assched43dbac2017-01-20 13:04:10 -0800140 dma_addr_t i_send_hdrs_dma;
Andy Groverec162272009-02-24 15:30:30 +0000141 struct rds_ib_send_work *i_sends;
Zach Brownf0460112010-07-14 13:55:35 -0700142 atomic_t i_signaled_sends;
Andy Groverec162272009-02-24 15:30:30 +0000143
144 /* rx */
145 struct mutex i_recv_mutex;
146 struct rds_ib_work_ring i_recv_ring;
147 struct rds_ib_incoming *i_ibinc;
148 u32 i_recv_data_rem;
149 struct rds_header *i_recv_hdrs;
Bart Van Assched43dbac2017-01-20 13:04:10 -0800150 dma_addr_t i_recv_hdrs_dma;
Andy Groverec162272009-02-24 15:30:30 +0000151 struct rds_ib_recv_work *i_recvs;
Andy Groverec162272009-02-24 15:30:30 +0000152 u64 i_ack_recv; /* last ACK received */
Chris Mason33244122010-05-26 22:05:37 -0700153 struct rds_ib_refill_cache i_cache_incs;
154 struct rds_ib_refill_cache i_cache_frags;
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700155 atomic_t i_cache_allocs;
Andy Groverec162272009-02-24 15:30:30 +0000156
157 /* sending acks */
158 unsigned long i_ack_flags;
Andy Grover8cbd9602009-04-01 08:20:20 +0000159#ifdef KERNEL_HAS_ATOMIC64
160 atomic64_t i_ack_next; /* next ACK to send */
161#else
162 spinlock_t i_ack_lock; /* protect i_ack_next */
Andy Groverec162272009-02-24 15:30:30 +0000163 u64 i_ack_next; /* next ACK to send */
Andy Grover8cbd9602009-04-01 08:20:20 +0000164#endif
Andy Groverec162272009-02-24 15:30:30 +0000165 struct rds_header *i_ack;
166 struct ib_send_wr i_ack_wr;
167 struct ib_sge i_ack_sge;
Bart Van Assched43dbac2017-01-20 13:04:10 -0800168 dma_addr_t i_ack_dma;
Andy Groverec162272009-02-24 15:30:30 +0000169 unsigned long i_ack_queued;
170
171 /* Flow control related information
172 *
173 * Our algorithm uses a pair variables that we need to access
174 * atomically - one for the send credits, and one posted
175 * recv credits we need to transfer to remote.
176 * Rather than protect them using a slow spinlock, we put both into
177 * a single atomic_t and update it using cmpxchg
178 */
179 atomic_t i_credits;
180
181 /* Protocol version specific information */
182 unsigned int i_flowctl:1; /* enable/disable flow ctl */
183
184 /* Batched completions */
185 unsigned int i_unsignaled_wrs;
Santosh Shilimkar581d53c2016-07-09 18:31:38 -0700186
187 /* Endpoint role in connection */
188 bool i_active_side;
Santosh Shilimkarcf657262016-09-29 11:07:11 -0700189 atomic_t i_cq_quiesce;
Santosh Shilimkarbe2f76e2016-07-04 16:16:36 -0700190
191 /* Send/Recv vectors */
192 int i_scq_vector;
193 int i_rcq_vector;
Andy Groverec162272009-02-24 15:30:30 +0000194};
195
196/* This assumes that atomic_t is at least 32 bits */
197#define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
198#define IB_GET_POST_CREDITS(v) ((v) >> 16)
199#define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
200#define IB_SET_POST_CREDITS(v) ((v) << 16)
201
202struct rds_ib_ipaddr {
203 struct list_head list;
204 __be32 ipaddr;
Santosh Shilimkar59fe4602012-02-03 11:09:23 -0500205 struct rcu_head rcu;
Andy Groverec162272009-02-24 15:30:30 +0000206};
207
Santosh Shilimkar06766512015-09-10 21:20:57 -0700208enum {
209 RDS_IB_MR_8K_POOL,
210 RDS_IB_MR_1M_POOL,
211};
212
Andy Groverec162272009-02-24 15:30:30 +0000213struct rds_ib_device {
214 struct list_head list;
215 struct list_head ipaddr_list;
216 struct list_head conn_list;
217 struct ib_device *dev;
218 struct ib_pd *pd;
santosh.shilimkar@oracle.com2cb29122016-03-01 15:20:52 -0800219 bool use_fastreg;
220
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800221 unsigned int max_mrs;
Santosh Shilimkar06766512015-09-10 21:20:57 -0700222 struct rds_ib_mr_pool *mr_1m_pool;
223 struct rds_ib_mr_pool *mr_8k_pool;
224 unsigned int fmr_max_remaps;
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800225 unsigned int max_8k_mrs;
226 unsigned int max_1m_mrs;
Andy Groverec162272009-02-24 15:30:30 +0000227 int max_sge;
228 unsigned int max_wrs;
Andy Grover40589e72010-01-12 10:50:48 -0800229 unsigned int max_initiator_depth;
230 unsigned int max_responder_resources;
Andy Groverec162272009-02-24 15:30:30 +0000231 spinlock_t spinlock; /* protect the above */
Reshetova, Elena50d61ff2017-07-04 15:53:15 +0300232 refcount_t refcount;
Zach Brown3e0249f2010-05-18 15:48:51 -0700233 struct work_struct free_work;
Santosh Shilimkarbe2f76e2016-07-04 16:16:36 -0700234 int *vector_load;
Andy Groverec162272009-02-24 15:30:30 +0000235};
236
Bart Van Assche5f68dca2017-01-20 13:04:34 -0800237#define ibdev_to_node(ibdev) dev_to_node((ibdev)->dev.parent)
Andy Grovere4c52c92010-04-23 10:49:53 -0700238#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
239
Andy Groverec162272009-02-24 15:30:30 +0000240/* bits for i_ack_flags */
241#define IB_ACK_IN_FLIGHT 0
242#define IB_ACK_REQUESTED 1
243
244/* Magic WR_ID for ACKs */
245#define RDS_IB_ACK_WR_ID (~(u64) 0)
246
247struct rds_ib_statistics {
248 uint64_t s_ib_connect_raced;
249 uint64_t s_ib_listen_closed_stale;
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400250 uint64_t s_ib_evt_handler_call;
251 uint64_t s_ib_tasklet_call;
Andy Groverec162272009-02-24 15:30:30 +0000252 uint64_t s_ib_tx_cq_event;
253 uint64_t s_ib_tx_ring_full;
254 uint64_t s_ib_tx_throttle;
255 uint64_t s_ib_tx_sg_mapping_failure;
256 uint64_t s_ib_tx_stalled;
257 uint64_t s_ib_tx_credit_updates;
Andy Groverec162272009-02-24 15:30:30 +0000258 uint64_t s_ib_rx_cq_event;
259 uint64_t s_ib_rx_ring_empty;
260 uint64_t s_ib_rx_refill_from_cq;
261 uint64_t s_ib_rx_refill_from_thread;
262 uint64_t s_ib_rx_alloc_limit;
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700263 uint64_t s_ib_rx_total_frags;
264 uint64_t s_ib_rx_total_incs;
Andy Groverec162272009-02-24 15:30:30 +0000265 uint64_t s_ib_rx_credit_updates;
266 uint64_t s_ib_ack_sent;
267 uint64_t s_ib_ack_send_failure;
268 uint64_t s_ib_ack_send_delayed;
269 uint64_t s_ib_ack_send_piggybacked;
270 uint64_t s_ib_ack_received;
Santosh Shilimkar06766512015-09-10 21:20:57 -0700271 uint64_t s_ib_rdma_mr_8k_alloc;
272 uint64_t s_ib_rdma_mr_8k_free;
273 uint64_t s_ib_rdma_mr_8k_used;
274 uint64_t s_ib_rdma_mr_8k_pool_flush;
275 uint64_t s_ib_rdma_mr_8k_pool_wait;
276 uint64_t s_ib_rdma_mr_8k_pool_depleted;
277 uint64_t s_ib_rdma_mr_1m_alloc;
278 uint64_t s_ib_rdma_mr_1m_free;
279 uint64_t s_ib_rdma_mr_1m_used;
280 uint64_t s_ib_rdma_mr_1m_pool_flush;
281 uint64_t s_ib_rdma_mr_1m_pool_wait;
282 uint64_t s_ib_rdma_mr_1m_pool_depleted;
santosh.shilimkar@oracle.comdb427532016-03-01 15:20:51 -0800283 uint64_t s_ib_rdma_mr_8k_reused;
284 uint64_t s_ib_rdma_mr_1m_reused;
Andy Grover51e2cba2010-03-29 17:47:30 -0700285 uint64_t s_ib_atomic_cswp;
286 uint64_t s_ib_atomic_fadd;
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700287 uint64_t s_ib_recv_added_to_cache;
288 uint64_t s_ib_recv_removed_from_cache;
Andy Groverec162272009-02-24 15:30:30 +0000289};
290
291extern struct workqueue_struct *rds_ib_wq;
292
293/*
294 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
295 * doesn't define it.
296 */
297static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200298 struct scatterlist *sglist,
299 unsigned int sg_dma_len,
300 int direction)
Andy Groverec162272009-02-24 15:30:30 +0000301{
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200302 struct scatterlist *sg;
Andy Groverec162272009-02-24 15:30:30 +0000303 unsigned int i;
304
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200305 for_each_sg(sglist, sg, sg_dma_len, i) {
Andy Groverec162272009-02-24 15:30:30 +0000306 ib_dma_sync_single_for_cpu(dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200307 ib_sg_dma_address(dev, sg),
308 ib_sg_dma_len(dev, sg),
Andy Groverec162272009-02-24 15:30:30 +0000309 direction);
310 }
311}
312#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
313
314static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200315 struct scatterlist *sglist,
316 unsigned int sg_dma_len,
317 int direction)
Andy Groverec162272009-02-24 15:30:30 +0000318{
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200319 struct scatterlist *sg;
Andy Groverec162272009-02-24 15:30:30 +0000320 unsigned int i;
321
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200322 for_each_sg(sglist, sg, sg_dma_len, i) {
Andy Groverec162272009-02-24 15:30:30 +0000323 ib_dma_sync_single_for_device(dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200324 ib_sg_dma_address(dev, sg),
325 ib_sg_dma_len(dev, sg),
Andy Groverec162272009-02-24 15:30:30 +0000326 direction);
327 }
328}
329#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
330
331
332/* ib.c */
333extern struct rds_transport rds_ib_transport;
Zach Brown3e0249f2010-05-18 15:48:51 -0700334struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
335void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
Andy Groverec162272009-02-24 15:30:30 +0000336extern struct ib_client rds_ib_client;
337
Andy Grover3ba23ad2009-07-17 13:13:22 +0000338extern unsigned int rds_ib_retry_count;
Andy Groverec162272009-02-24 15:30:30 +0000339
340extern spinlock_t ib_nodev_conns_lock;
341extern struct list_head ib_nodev_conns;
342
343/* ib_cm.c */
344int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
345void rds_ib_conn_free(void *arg);
Sowmini Varadhanb04e8552016-06-30 16:11:16 -0700346int rds_ib_conn_path_connect(struct rds_conn_path *cp);
Sowmini Varadhan226f7a72016-06-30 16:11:10 -0700347void rds_ib_conn_path_shutdown(struct rds_conn_path *cp);
Andy Groverec162272009-02-24 15:30:30 +0000348void rds_ib_state_change(struct sock *sk);
Zach Brownef87b7e2010-07-09 12:26:20 -0700349int rds_ib_listen_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000350void rds_ib_listen_stop(void);
Nicolas Iooss6cdaf032016-08-05 22:11:12 +0200351__printf(2, 3)
Andy Groverec162272009-02-24 15:30:30 +0000352void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
353int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
354 struct rdma_cm_event *event);
355int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
356void rds_ib_cm_connect_complete(struct rds_connection *conn,
357 struct rdma_cm_event *event);
358
359
360#define rds_ib_conn_error(conn, fmt...) \
361 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
362
363/* ib_rdma.c */
364int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
Andy Grover745cbcc2009-04-01 08:20:19 +0000365void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
366void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
Zach Brown8aeb1ba2010-06-25 14:58:16 -0700367void rds_ib_destroy_nodev_conns(void);
Avinash Repaka16591852016-03-01 15:20:54 -0800368void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
Andy Groverec162272009-02-24 15:30:30 +0000369
370/* ib_recv.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700371int rds_ib_recv_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000372void rds_ib_recv_exit(void);
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700373int rds_ib_recv_path(struct rds_conn_path *conn);
Chris Mason33244122010-05-26 22:05:37 -0700374int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
375void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700376void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
Andy Groverec162272009-02-24 15:30:30 +0000377void rds_ib_inc_free(struct rds_incoming *inc);
Al Viroc310e722014-11-20 09:21:14 -0500378int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400379void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
380 struct rds_ib_ack_state *state);
Andy Groverd521b632009-10-30 08:51:57 +0000381void rds_ib_recv_tasklet_fn(unsigned long data);
Andy Groverec162272009-02-24 15:30:30 +0000382void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
383void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
384void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
385void rds_ib_attempt_ack(struct rds_ib_connection *ic);
386void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
387u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400388void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required);
Andy Groverec162272009-02-24 15:30:30 +0000389
390/* ib_ring.c */
391void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
392void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
393u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
394void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
395void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
396int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
397int rds_ib_ring_low(struct rds_ib_work_ring *ring);
398u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
399u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
400extern wait_queue_head_t rds_ib_ring_empty_wait;
401
402/* ib_send.c */
Sowmini Varadhan226f7a72016-06-30 16:11:10 -0700403void rds_ib_xmit_path_complete(struct rds_conn_path *cp);
Andy Groverec162272009-02-24 15:30:30 +0000404int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
405 unsigned int hdr_off, unsigned int sg, unsigned int off);
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400406void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
Andy Groverec162272009-02-24 15:30:30 +0000407void rds_ib_send_init_ring(struct rds_ib_connection *ic);
408void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800409int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000410void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
411void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
412int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
Steve Wise7b70d032009-04-09 14:09:39 +0000413 u32 *adv_credits, int need_posted, int max_posted);
Andy Groverff3d7d32010-03-01 14:03:09 -0800414int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000415
416/* ib_stats.c */
417DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
418#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700419#define rds_ib_stats_add(member, count) \
420 rds_stats_add_which(rds_ib_stats, member, count)
Andy Groverec162272009-02-24 15:30:30 +0000421unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
422 unsigned int avail);
423
424/* ib_sysctl.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700425int rds_ib_sysctl_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000426void rds_ib_sysctl_exit(void);
427extern unsigned long rds_ib_sysctl_max_send_wr;
428extern unsigned long rds_ib_sysctl_max_recv_wr;
429extern unsigned long rds_ib_sysctl_max_unsig_wrs;
430extern unsigned long rds_ib_sysctl_max_unsig_bytes;
431extern unsigned long rds_ib_sysctl_max_recv_allocation;
432extern unsigned int rds_ib_sysctl_flow_control;
Andy Groverec162272009-02-24 15:30:30 +0000433
Andy Groverec162272009-02-24 15:30:30 +0000434#endif