blob: b3fdebb57460392ae9a751bfaae2e218ff9b6e17 [file] [log] [blame]
Andy Groverec162272009-02-24 15:30:30 +00001#ifndef _RDS_IB_H
2#define _RDS_IB_H
3
4#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +00006#include <linux/interrupt.h>
Andy Grovere4c52c92010-04-23 10:49:53 -07007#include <linux/pci.h>
8#include <linux/slab.h>
Andy Groverec162272009-02-24 15:30:30 +00009#include "rds.h"
10#include "rdma_transport.h"
11
Santosh Shilimkar06766512015-09-10 21:20:57 -070012#define RDS_FMR_1M_POOL_SIZE (8192 / 2)
13#define RDS_FMR_1M_MSG_SIZE 256
14#define RDS_FMR_8K_MSG_SIZE 2
15#define RDS_MR_8K_SCALE (256 / (RDS_FMR_8K_MSG_SIZE + 1))
16#define RDS_FMR_8K_POOL_SIZE (RDS_MR_8K_SCALE * (8192 / 2))
Andy Groverec162272009-02-24 15:30:30 +000017
18#define RDS_IB_MAX_SGE 8
19#define RDS_IB_RECV_SGE 2
20
21#define RDS_IB_DEFAULT_RECV_WR 1024
22#define RDS_IB_DEFAULT_SEND_WR 256
23
Andy Grover3ba23ad2009-07-17 13:13:22 +000024#define RDS_IB_DEFAULT_RETRY_COUNT 2
25
Andy Groverec162272009-02-24 15:30:30 +000026#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
27
Chris Mason33244122010-05-26 22:05:37 -070028#define RDS_IB_RECYCLE_BATCH_COUNT 32
29
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -040030#define RDS_IB_WC_MAX 32
Santosh Shilimkar0c28c042015-09-06 02:18:51 -040031#define RDS_IB_SEND_OP BIT_ULL(63)
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -040032
Zach Brownea819862010-07-15 12:34:33 -070033extern struct rw_semaphore rds_ib_devices_lock;
Andy Groverec162272009-02-24 15:30:30 +000034extern struct list_head rds_ib_devices;
35
36/*
37 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
38 * try and minimize the amount of memory tied up both the device and
39 * socket receive queues.
40 */
Andy Groverec162272009-02-24 15:30:30 +000041struct rds_page_frag {
42 struct list_head f_item;
Chris Mason33244122010-05-26 22:05:37 -070043 struct list_head f_cache_entry;
Andy Grover0b088e02010-05-24 20:12:41 -070044 struct scatterlist f_sg;
Andy Groverec162272009-02-24 15:30:30 +000045};
46
47struct rds_ib_incoming {
48 struct list_head ii_frags;
Chris Mason33244122010-05-26 22:05:37 -070049 struct list_head ii_cache_entry;
Andy Groverec162272009-02-24 15:30:30 +000050 struct rds_incoming ii_inc;
51};
52
Chris Mason33244122010-05-26 22:05:37 -070053struct rds_ib_cache_head {
54 struct list_head *first;
55 unsigned long count;
56};
57
58struct rds_ib_refill_cache {
Shan Weiae4b46e2012-11-12 15:52:01 +000059 struct rds_ib_cache_head __percpu *percpu;
Chris Mason33244122010-05-26 22:05:37 -070060 struct list_head *xfer;
61 struct list_head *ready;
62};
63
Andy Groverec162272009-02-24 15:30:30 +000064struct rds_ib_connect_private {
65 /* Add new fields at the end, and don't permute existing fields. */
66 __be32 dp_saddr;
67 __be32 dp_daddr;
68 u8 dp_protocol_major;
69 u8 dp_protocol_minor;
70 __be16 dp_protocol_minor_mask; /* bitmask */
71 __be32 dp_reserved1;
72 __be64 dp_ack_seq;
73 __be32 dp_credit; /* non-zero enables flow ctl */
74};
75
76struct rds_ib_send_work {
Andy Groverff3d7d32010-03-01 14:03:09 -080077 void *s_op;
Christoph Hellwige622f2f2015-10-08 09:16:33 +010078 union {
79 struct ib_send_wr s_wr;
80 struct ib_rdma_wr s_rdma_wr;
81 struct ib_atomic_wr s_atomic_wr;
82 };
Andy Groverec162272009-02-24 15:30:30 +000083 struct ib_sge s_sge[RDS_IB_MAX_SGE];
84 unsigned long s_queued;
85};
86
87struct rds_ib_recv_work {
88 struct rds_ib_incoming *r_ibinc;
89 struct rds_page_frag *r_frag;
90 struct ib_recv_wr r_wr;
91 struct ib_sge r_sge[2];
92};
93
94struct rds_ib_work_ring {
95 u32 w_nr;
96 u32 w_alloc_ptr;
97 u32 w_alloc_ctr;
98 u32 w_free_ptr;
99 atomic_t w_free_ctr;
100};
101
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400102/* Rings are posted with all the allocations they'll need to queue the
103 * incoming message to the receiving socket so this can't fail.
104 * All fragments start with a header, so we can make sure we're not receiving
105 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
106 */
107struct rds_ib_ack_state {
108 u64 ack_next;
109 u64 ack_recv;
110 unsigned int ack_required:1;
111 unsigned int ack_next_valid:1;
112 unsigned int ack_recv_valid:1;
113};
114
115
Andy Groverec162272009-02-24 15:30:30 +0000116struct rds_ib_device;
117
118struct rds_ib_connection {
119
120 struct list_head ib_node;
121 struct rds_ib_device *rds_ibdev;
122 struct rds_connection *conn;
123
124 /* alphabet soup, IBTA style */
125 struct rdma_cm_id *i_cm_id;
126 struct ib_pd *i_pd;
Andy Groverec162272009-02-24 15:30:30 +0000127 struct ib_cq *i_send_cq;
128 struct ib_cq *i_recv_cq;
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400129 struct ib_wc i_send_wc[RDS_IB_WC_MAX];
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400130 struct ib_wc i_recv_wc[RDS_IB_WC_MAX];
131
132 /* interrupt handling */
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400133 struct tasklet_struct i_send_tasklet;
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400134 struct tasklet_struct i_recv_tasklet;
Andy Groverec162272009-02-24 15:30:30 +0000135
136 /* tx */
137 struct rds_ib_work_ring i_send_ring;
Andy Groverff3d7d32010-03-01 14:03:09 -0800138 struct rm_data_op *i_data_op;
Andy Groverec162272009-02-24 15:30:30 +0000139 struct rds_header *i_send_hdrs;
140 u64 i_send_hdrs_dma;
141 struct rds_ib_send_work *i_sends;
Zach Brownf0460112010-07-14 13:55:35 -0700142 atomic_t i_signaled_sends;
Andy Groverec162272009-02-24 15:30:30 +0000143
144 /* rx */
145 struct mutex i_recv_mutex;
146 struct rds_ib_work_ring i_recv_ring;
147 struct rds_ib_incoming *i_ibinc;
148 u32 i_recv_data_rem;
149 struct rds_header *i_recv_hdrs;
150 u64 i_recv_hdrs_dma;
151 struct rds_ib_recv_work *i_recvs;
Andy Groverec162272009-02-24 15:30:30 +0000152 u64 i_ack_recv; /* last ACK received */
Chris Mason33244122010-05-26 22:05:37 -0700153 struct rds_ib_refill_cache i_cache_incs;
154 struct rds_ib_refill_cache i_cache_frags;
Andy Groverec162272009-02-24 15:30:30 +0000155
156 /* sending acks */
157 unsigned long i_ack_flags;
Andy Grover8cbd9602009-04-01 08:20:20 +0000158#ifdef KERNEL_HAS_ATOMIC64
159 atomic64_t i_ack_next; /* next ACK to send */
160#else
161 spinlock_t i_ack_lock; /* protect i_ack_next */
Andy Groverec162272009-02-24 15:30:30 +0000162 u64 i_ack_next; /* next ACK to send */
Andy Grover8cbd9602009-04-01 08:20:20 +0000163#endif
Andy Groverec162272009-02-24 15:30:30 +0000164 struct rds_header *i_ack;
165 struct ib_send_wr i_ack_wr;
166 struct ib_sge i_ack_sge;
167 u64 i_ack_dma;
168 unsigned long i_ack_queued;
169
170 /* Flow control related information
171 *
172 * Our algorithm uses a pair variables that we need to access
173 * atomically - one for the send credits, and one posted
174 * recv credits we need to transfer to remote.
175 * Rather than protect them using a slow spinlock, we put both into
176 * a single atomic_t and update it using cmpxchg
177 */
178 atomic_t i_credits;
179
180 /* Protocol version specific information */
181 unsigned int i_flowctl:1; /* enable/disable flow ctl */
182
183 /* Batched completions */
184 unsigned int i_unsignaled_wrs;
Andy Groverec162272009-02-24 15:30:30 +0000185};
186
187/* This assumes that atomic_t is at least 32 bits */
188#define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
189#define IB_GET_POST_CREDITS(v) ((v) >> 16)
190#define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
191#define IB_SET_POST_CREDITS(v) ((v) << 16)
192
193struct rds_ib_ipaddr {
194 struct list_head list;
195 __be32 ipaddr;
Santosh Shilimkar59fe4602012-02-03 11:09:23 -0500196 struct rcu_head rcu;
Andy Groverec162272009-02-24 15:30:30 +0000197};
198
Santosh Shilimkar06766512015-09-10 21:20:57 -0700199enum {
200 RDS_IB_MR_8K_POOL,
201 RDS_IB_MR_1M_POOL,
202};
203
Andy Groverec162272009-02-24 15:30:30 +0000204struct rds_ib_device {
205 struct list_head list;
206 struct list_head ipaddr_list;
207 struct list_head conn_list;
208 struct ib_device *dev;
209 struct ib_pd *pd;
Andy Groverec162272009-02-24 15:30:30 +0000210 unsigned int max_fmrs;
Santosh Shilimkar06766512015-09-10 21:20:57 -0700211 struct rds_ib_mr_pool *mr_1m_pool;
212 struct rds_ib_mr_pool *mr_8k_pool;
213 unsigned int fmr_max_remaps;
214 unsigned int max_8k_fmrs;
215 unsigned int max_1m_fmrs;
Andy Groverec162272009-02-24 15:30:30 +0000216 int max_sge;
217 unsigned int max_wrs;
Andy Grover40589e72010-01-12 10:50:48 -0800218 unsigned int max_initiator_depth;
219 unsigned int max_responder_resources;
Andy Groverec162272009-02-24 15:30:30 +0000220 spinlock_t spinlock; /* protect the above */
Zach Brown3e0249f2010-05-18 15:48:51 -0700221 atomic_t refcount;
222 struct work_struct free_work;
Andy Groverec162272009-02-24 15:30:30 +0000223};
224
Thadeu Lima de Souza Cascardoa0c6ffb2012-05-28 08:52:05 +0000225#define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device)
Andy Grovere4c52c92010-04-23 10:49:53 -0700226#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
227
Andy Groverec162272009-02-24 15:30:30 +0000228/* bits for i_ack_flags */
229#define IB_ACK_IN_FLIGHT 0
230#define IB_ACK_REQUESTED 1
231
232/* Magic WR_ID for ACKs */
233#define RDS_IB_ACK_WR_ID (~(u64) 0)
234
235struct rds_ib_statistics {
236 uint64_t s_ib_connect_raced;
237 uint64_t s_ib_listen_closed_stale;
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400238 uint64_t s_ib_evt_handler_call;
239 uint64_t s_ib_tasklet_call;
Andy Groverec162272009-02-24 15:30:30 +0000240 uint64_t s_ib_tx_cq_event;
241 uint64_t s_ib_tx_ring_full;
242 uint64_t s_ib_tx_throttle;
243 uint64_t s_ib_tx_sg_mapping_failure;
244 uint64_t s_ib_tx_stalled;
245 uint64_t s_ib_tx_credit_updates;
Andy Groverec162272009-02-24 15:30:30 +0000246 uint64_t s_ib_rx_cq_event;
247 uint64_t s_ib_rx_ring_empty;
248 uint64_t s_ib_rx_refill_from_cq;
249 uint64_t s_ib_rx_refill_from_thread;
250 uint64_t s_ib_rx_alloc_limit;
251 uint64_t s_ib_rx_credit_updates;
252 uint64_t s_ib_ack_sent;
253 uint64_t s_ib_ack_send_failure;
254 uint64_t s_ib_ack_send_delayed;
255 uint64_t s_ib_ack_send_piggybacked;
256 uint64_t s_ib_ack_received;
Santosh Shilimkar06766512015-09-10 21:20:57 -0700257 uint64_t s_ib_rdma_mr_8k_alloc;
258 uint64_t s_ib_rdma_mr_8k_free;
259 uint64_t s_ib_rdma_mr_8k_used;
260 uint64_t s_ib_rdma_mr_8k_pool_flush;
261 uint64_t s_ib_rdma_mr_8k_pool_wait;
262 uint64_t s_ib_rdma_mr_8k_pool_depleted;
263 uint64_t s_ib_rdma_mr_1m_alloc;
264 uint64_t s_ib_rdma_mr_1m_free;
265 uint64_t s_ib_rdma_mr_1m_used;
266 uint64_t s_ib_rdma_mr_1m_pool_flush;
267 uint64_t s_ib_rdma_mr_1m_pool_wait;
268 uint64_t s_ib_rdma_mr_1m_pool_depleted;
Andy Grover51e2cba2010-03-29 17:47:30 -0700269 uint64_t s_ib_atomic_cswp;
270 uint64_t s_ib_atomic_fadd;
Andy Groverec162272009-02-24 15:30:30 +0000271};
272
273extern struct workqueue_struct *rds_ib_wq;
274
275/*
276 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
277 * doesn't define it.
278 */
279static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200280 struct scatterlist *sglist,
281 unsigned int sg_dma_len,
282 int direction)
Andy Groverec162272009-02-24 15:30:30 +0000283{
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200284 struct scatterlist *sg;
Andy Groverec162272009-02-24 15:30:30 +0000285 unsigned int i;
286
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200287 for_each_sg(sglist, sg, sg_dma_len, i) {
Andy Groverec162272009-02-24 15:30:30 +0000288 ib_dma_sync_single_for_cpu(dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200289 ib_sg_dma_address(dev, sg),
290 ib_sg_dma_len(dev, sg),
Andy Groverec162272009-02-24 15:30:30 +0000291 direction);
292 }
293}
294#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
295
296static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200297 struct scatterlist *sglist,
298 unsigned int sg_dma_len,
299 int direction)
Andy Groverec162272009-02-24 15:30:30 +0000300{
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200301 struct scatterlist *sg;
Andy Groverec162272009-02-24 15:30:30 +0000302 unsigned int i;
303
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200304 for_each_sg(sglist, sg, sg_dma_len, i) {
Andy Groverec162272009-02-24 15:30:30 +0000305 ib_dma_sync_single_for_device(dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200306 ib_sg_dma_address(dev, sg),
307 ib_sg_dma_len(dev, sg),
Andy Groverec162272009-02-24 15:30:30 +0000308 direction);
309 }
310}
311#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
312
313
314/* ib.c */
315extern struct rds_transport rds_ib_transport;
Zach Brown3e0249f2010-05-18 15:48:51 -0700316struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
317void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
Andy Groverec162272009-02-24 15:30:30 +0000318extern struct ib_client rds_ib_client;
319
Santosh Shilimkar06766512015-09-10 21:20:57 -0700320extern unsigned int rds_ib_fmr_1m_pool_size;
321extern unsigned int rds_ib_fmr_8k_pool_size;
Andy Grover3ba23ad2009-07-17 13:13:22 +0000322extern unsigned int rds_ib_retry_count;
Andy Groverec162272009-02-24 15:30:30 +0000323
324extern spinlock_t ib_nodev_conns_lock;
325extern struct list_head ib_nodev_conns;
326
327/* ib_cm.c */
328int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
329void rds_ib_conn_free(void *arg);
330int rds_ib_conn_connect(struct rds_connection *conn);
331void rds_ib_conn_shutdown(struct rds_connection *conn);
332void rds_ib_state_change(struct sock *sk);
Zach Brownef87b7e2010-07-09 12:26:20 -0700333int rds_ib_listen_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000334void rds_ib_listen_stop(void);
335void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
336int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
337 struct rdma_cm_event *event);
338int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
339void rds_ib_cm_connect_complete(struct rds_connection *conn,
340 struct rdma_cm_event *event);
341
342
343#define rds_ib_conn_error(conn, fmt...) \
344 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
345
346/* ib_rdma.c */
347int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
Andy Grover745cbcc2009-04-01 08:20:19 +0000348void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
349void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
Zach Brown8aeb1ba2010-06-25 14:58:16 -0700350void rds_ib_destroy_nodev_conns(void);
Santosh Shilimkar06766512015-09-10 21:20:57 -0700351struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_dev,
352 int npages);
Andy Groverec162272009-02-24 15:30:30 +0000353void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo);
354void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
355void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
356 struct rds_sock *rs, u32 *key_ret);
357void rds_ib_sync_mr(void *trans_private, int dir);
358void rds_ib_free_mr(void *trans_private, int invalidate);
359void rds_ib_flush_mrs(void);
santosh.shilimkar@oracle.comad1d7dc2015-08-25 12:02:01 -0700360int rds_ib_fmr_init(void);
361void rds_ib_fmr_exit(void);
Andy Groverec162272009-02-24 15:30:30 +0000362
363/* ib_recv.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700364int rds_ib_recv_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000365void rds_ib_recv_exit(void);
366int rds_ib_recv(struct rds_connection *conn);
Chris Mason33244122010-05-26 22:05:37 -0700367int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
368void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700369void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
Andy Groverec162272009-02-24 15:30:30 +0000370void rds_ib_inc_free(struct rds_incoming *inc);
Al Viroc310e722014-11-20 09:21:14 -0500371int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400372void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
373 struct rds_ib_ack_state *state);
Andy Groverd521b632009-10-30 08:51:57 +0000374void rds_ib_recv_tasklet_fn(unsigned long data);
Andy Groverec162272009-02-24 15:30:30 +0000375void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
376void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
377void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
378void rds_ib_attempt_ack(struct rds_ib_connection *ic);
379void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
380u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400381void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required);
Andy Groverec162272009-02-24 15:30:30 +0000382
383/* ib_ring.c */
384void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
385void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
386u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
387void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
388void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
389int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
390int rds_ib_ring_low(struct rds_ib_work_ring *ring);
391u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
392u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
393extern wait_queue_head_t rds_ib_ring_empty_wait;
394
395/* ib_send.c */
396void rds_ib_xmit_complete(struct rds_connection *conn);
397int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
398 unsigned int hdr_off, unsigned int sg, unsigned int off);
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400399void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
Andy Groverec162272009-02-24 15:30:30 +0000400void rds_ib_send_init_ring(struct rds_ib_connection *ic);
401void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800402int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000403void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
404void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
405int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
Steve Wise7b70d032009-04-09 14:09:39 +0000406 u32 *adv_credits, int need_posted, int max_posted);
Andy Groverff3d7d32010-03-01 14:03:09 -0800407int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000408
409/* ib_stats.c */
410DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
411#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
412unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
413 unsigned int avail);
414
415/* ib_sysctl.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700416int rds_ib_sysctl_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000417void rds_ib_sysctl_exit(void);
418extern unsigned long rds_ib_sysctl_max_send_wr;
419extern unsigned long rds_ib_sysctl_max_recv_wr;
420extern unsigned long rds_ib_sysctl_max_unsig_wrs;
421extern unsigned long rds_ib_sysctl_max_unsig_bytes;
422extern unsigned long rds_ib_sysctl_max_recv_allocation;
423extern unsigned int rds_ib_sysctl_flow_control;
Andy Groverec162272009-02-24 15:30:30 +0000424
Andy Groverec162272009-02-24 15:30:30 +0000425#endif