blob: c88cb229db16e3bf56d0940a1e3678127561c5ed [file] [log] [blame]
Andy Groverec162272009-02-24 15:30:30 +00001#ifndef _RDS_IB_H
2#define _RDS_IB_H
3
4#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +00006#include <linux/interrupt.h>
Andy Grovere4c52c92010-04-23 10:49:53 -07007#include <linux/pci.h>
8#include <linux/slab.h>
Andy Groverec162272009-02-24 15:30:30 +00009#include "rds.h"
10#include "rdma_transport.h"
11
Andy Groverec162272009-02-24 15:30:30 +000012#define RDS_IB_MAX_SGE 8
13#define RDS_IB_RECV_SGE 2
14
15#define RDS_IB_DEFAULT_RECV_WR 1024
16#define RDS_IB_DEFAULT_SEND_WR 256
17
Andy Grover3ba23ad2009-07-17 13:13:22 +000018#define RDS_IB_DEFAULT_RETRY_COUNT 2
19
Andy Groverec162272009-02-24 15:30:30 +000020#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
21
Chris Mason33244122010-05-26 22:05:37 -070022#define RDS_IB_RECYCLE_BATCH_COUNT 32
23
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -040024#define RDS_IB_WC_MAX 32
25
Zach Brownea819862010-07-15 12:34:33 -070026extern struct rw_semaphore rds_ib_devices_lock;
Andy Groverec162272009-02-24 15:30:30 +000027extern struct list_head rds_ib_devices;
28
29/*
30 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
31 * try and minimize the amount of memory tied up both the device and
32 * socket receive queues.
33 */
Andy Groverec162272009-02-24 15:30:30 +000034struct rds_page_frag {
35 struct list_head f_item;
Chris Mason33244122010-05-26 22:05:37 -070036 struct list_head f_cache_entry;
Andy Grover0b088e02010-05-24 20:12:41 -070037 struct scatterlist f_sg;
Andy Groverec162272009-02-24 15:30:30 +000038};
39
40struct rds_ib_incoming {
41 struct list_head ii_frags;
Chris Mason33244122010-05-26 22:05:37 -070042 struct list_head ii_cache_entry;
Andy Groverec162272009-02-24 15:30:30 +000043 struct rds_incoming ii_inc;
44};
45
Chris Mason33244122010-05-26 22:05:37 -070046struct rds_ib_cache_head {
47 struct list_head *first;
48 unsigned long count;
49};
50
51struct rds_ib_refill_cache {
Shan Weiae4b46e2012-11-12 15:52:01 +000052 struct rds_ib_cache_head __percpu *percpu;
Chris Mason33244122010-05-26 22:05:37 -070053 struct list_head *xfer;
54 struct list_head *ready;
55};
56
Andy Groverec162272009-02-24 15:30:30 +000057struct rds_ib_connect_private {
58 /* Add new fields at the end, and don't permute existing fields. */
59 __be32 dp_saddr;
60 __be32 dp_daddr;
61 u8 dp_protocol_major;
62 u8 dp_protocol_minor;
63 __be16 dp_protocol_minor_mask; /* bitmask */
64 __be32 dp_reserved1;
65 __be64 dp_ack_seq;
66 __be32 dp_credit; /* non-zero enables flow ctl */
67};
68
69struct rds_ib_send_work {
Andy Groverff3d7d32010-03-01 14:03:09 -080070 void *s_op;
Christoph Hellwige622f2f2015-10-08 09:16:33 +010071 union {
72 struct ib_send_wr s_wr;
73 struct ib_rdma_wr s_rdma_wr;
74 struct ib_atomic_wr s_atomic_wr;
75 };
Andy Groverec162272009-02-24 15:30:30 +000076 struct ib_sge s_sge[RDS_IB_MAX_SGE];
77 unsigned long s_queued;
78};
79
80struct rds_ib_recv_work {
81 struct rds_ib_incoming *r_ibinc;
82 struct rds_page_frag *r_frag;
83 struct ib_recv_wr r_wr;
84 struct ib_sge r_sge[2];
85};
86
87struct rds_ib_work_ring {
88 u32 w_nr;
89 u32 w_alloc_ptr;
90 u32 w_alloc_ctr;
91 u32 w_free_ptr;
92 atomic_t w_free_ctr;
93};
94
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -040095/* Rings are posted with all the allocations they'll need to queue the
96 * incoming message to the receiving socket so this can't fail.
97 * All fragments start with a header, so we can make sure we're not receiving
98 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
99 */
100struct rds_ib_ack_state {
101 u64 ack_next;
102 u64 ack_recv;
103 unsigned int ack_required:1;
104 unsigned int ack_next_valid:1;
105 unsigned int ack_recv_valid:1;
106};
107
108
Andy Groverec162272009-02-24 15:30:30 +0000109struct rds_ib_device;
110
111struct rds_ib_connection {
112
113 struct list_head ib_node;
114 struct rds_ib_device *rds_ibdev;
115 struct rds_connection *conn;
116
117 /* alphabet soup, IBTA style */
118 struct rdma_cm_id *i_cm_id;
119 struct ib_pd *i_pd;
Andy Groverec162272009-02-24 15:30:30 +0000120 struct ib_cq *i_send_cq;
121 struct ib_cq *i_recv_cq;
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400122 struct ib_wc i_send_wc[RDS_IB_WC_MAX];
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400123 struct ib_wc i_recv_wc[RDS_IB_WC_MAX];
124
125 /* interrupt handling */
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400126 struct tasklet_struct i_send_tasklet;
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400127 struct tasklet_struct i_recv_tasklet;
Andy Groverec162272009-02-24 15:30:30 +0000128
129 /* tx */
130 struct rds_ib_work_ring i_send_ring;
Andy Groverff3d7d32010-03-01 14:03:09 -0800131 struct rm_data_op *i_data_op;
Andy Groverec162272009-02-24 15:30:30 +0000132 struct rds_header *i_send_hdrs;
133 u64 i_send_hdrs_dma;
134 struct rds_ib_send_work *i_sends;
Zach Brownf0460112010-07-14 13:55:35 -0700135 atomic_t i_signaled_sends;
Andy Groverec162272009-02-24 15:30:30 +0000136
137 /* rx */
138 struct mutex i_recv_mutex;
139 struct rds_ib_work_ring i_recv_ring;
140 struct rds_ib_incoming *i_ibinc;
141 u32 i_recv_data_rem;
142 struct rds_header *i_recv_hdrs;
143 u64 i_recv_hdrs_dma;
144 struct rds_ib_recv_work *i_recvs;
Andy Groverec162272009-02-24 15:30:30 +0000145 u64 i_ack_recv; /* last ACK received */
Chris Mason33244122010-05-26 22:05:37 -0700146 struct rds_ib_refill_cache i_cache_incs;
147 struct rds_ib_refill_cache i_cache_frags;
Andy Groverec162272009-02-24 15:30:30 +0000148
149 /* sending acks */
150 unsigned long i_ack_flags;
Andy Grover8cbd9602009-04-01 08:20:20 +0000151#ifdef KERNEL_HAS_ATOMIC64
152 atomic64_t i_ack_next; /* next ACK to send */
153#else
154 spinlock_t i_ack_lock; /* protect i_ack_next */
Andy Groverec162272009-02-24 15:30:30 +0000155 u64 i_ack_next; /* next ACK to send */
Andy Grover8cbd9602009-04-01 08:20:20 +0000156#endif
Andy Groverec162272009-02-24 15:30:30 +0000157 struct rds_header *i_ack;
158 struct ib_send_wr i_ack_wr;
159 struct ib_sge i_ack_sge;
160 u64 i_ack_dma;
161 unsigned long i_ack_queued;
162
163 /* Flow control related information
164 *
165 * Our algorithm uses a pair variables that we need to access
166 * atomically - one for the send credits, and one posted
167 * recv credits we need to transfer to remote.
168 * Rather than protect them using a slow spinlock, we put both into
169 * a single atomic_t and update it using cmpxchg
170 */
171 atomic_t i_credits;
172
173 /* Protocol version specific information */
174 unsigned int i_flowctl:1; /* enable/disable flow ctl */
175
176 /* Batched completions */
177 unsigned int i_unsignaled_wrs;
Andy Groverec162272009-02-24 15:30:30 +0000178};
179
180/* This assumes that atomic_t is at least 32 bits */
181#define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
182#define IB_GET_POST_CREDITS(v) ((v) >> 16)
183#define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
184#define IB_SET_POST_CREDITS(v) ((v) << 16)
185
186struct rds_ib_ipaddr {
187 struct list_head list;
188 __be32 ipaddr;
Santosh Shilimkar59fe4602012-02-03 11:09:23 -0500189 struct rcu_head rcu;
Andy Groverec162272009-02-24 15:30:30 +0000190};
191
Santosh Shilimkar06766512015-09-10 21:20:57 -0700192enum {
193 RDS_IB_MR_8K_POOL,
194 RDS_IB_MR_1M_POOL,
195};
196
Andy Groverec162272009-02-24 15:30:30 +0000197struct rds_ib_device {
198 struct list_head list;
199 struct list_head ipaddr_list;
200 struct list_head conn_list;
201 struct ib_device *dev;
202 struct ib_pd *pd;
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800203 unsigned int max_mrs;
Santosh Shilimkar06766512015-09-10 21:20:57 -0700204 struct rds_ib_mr_pool *mr_1m_pool;
205 struct rds_ib_mr_pool *mr_8k_pool;
206 unsigned int fmr_max_remaps;
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800207 unsigned int max_8k_mrs;
208 unsigned int max_1m_mrs;
Andy Groverec162272009-02-24 15:30:30 +0000209 int max_sge;
210 unsigned int max_wrs;
Andy Grover40589e72010-01-12 10:50:48 -0800211 unsigned int max_initiator_depth;
212 unsigned int max_responder_resources;
Andy Groverec162272009-02-24 15:30:30 +0000213 spinlock_t spinlock; /* protect the above */
Zach Brown3e0249f2010-05-18 15:48:51 -0700214 atomic_t refcount;
215 struct work_struct free_work;
Andy Groverec162272009-02-24 15:30:30 +0000216};
217
Thadeu Lima de Souza Cascardoa0c6ffb2012-05-28 08:52:05 +0000218#define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device)
Andy Grovere4c52c92010-04-23 10:49:53 -0700219#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
220
Andy Groverec162272009-02-24 15:30:30 +0000221/* bits for i_ack_flags */
222#define IB_ACK_IN_FLIGHT 0
223#define IB_ACK_REQUESTED 1
224
225/* Magic WR_ID for ACKs */
226#define RDS_IB_ACK_WR_ID (~(u64) 0)
227
228struct rds_ib_statistics {
229 uint64_t s_ib_connect_raced;
230 uint64_t s_ib_listen_closed_stale;
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400231 uint64_t s_ib_evt_handler_call;
232 uint64_t s_ib_tasklet_call;
Andy Groverec162272009-02-24 15:30:30 +0000233 uint64_t s_ib_tx_cq_event;
234 uint64_t s_ib_tx_ring_full;
235 uint64_t s_ib_tx_throttle;
236 uint64_t s_ib_tx_sg_mapping_failure;
237 uint64_t s_ib_tx_stalled;
238 uint64_t s_ib_tx_credit_updates;
Andy Groverec162272009-02-24 15:30:30 +0000239 uint64_t s_ib_rx_cq_event;
240 uint64_t s_ib_rx_ring_empty;
241 uint64_t s_ib_rx_refill_from_cq;
242 uint64_t s_ib_rx_refill_from_thread;
243 uint64_t s_ib_rx_alloc_limit;
244 uint64_t s_ib_rx_credit_updates;
245 uint64_t s_ib_ack_sent;
246 uint64_t s_ib_ack_send_failure;
247 uint64_t s_ib_ack_send_delayed;
248 uint64_t s_ib_ack_send_piggybacked;
249 uint64_t s_ib_ack_received;
Santosh Shilimkar06766512015-09-10 21:20:57 -0700250 uint64_t s_ib_rdma_mr_8k_alloc;
251 uint64_t s_ib_rdma_mr_8k_free;
252 uint64_t s_ib_rdma_mr_8k_used;
253 uint64_t s_ib_rdma_mr_8k_pool_flush;
254 uint64_t s_ib_rdma_mr_8k_pool_wait;
255 uint64_t s_ib_rdma_mr_8k_pool_depleted;
256 uint64_t s_ib_rdma_mr_1m_alloc;
257 uint64_t s_ib_rdma_mr_1m_free;
258 uint64_t s_ib_rdma_mr_1m_used;
259 uint64_t s_ib_rdma_mr_1m_pool_flush;
260 uint64_t s_ib_rdma_mr_1m_pool_wait;
261 uint64_t s_ib_rdma_mr_1m_pool_depleted;
Andy Grover51e2cba2010-03-29 17:47:30 -0700262 uint64_t s_ib_atomic_cswp;
263 uint64_t s_ib_atomic_fadd;
Andy Groverec162272009-02-24 15:30:30 +0000264};
265
266extern struct workqueue_struct *rds_ib_wq;
267
268/*
269 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
270 * doesn't define it.
271 */
272static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200273 struct scatterlist *sglist,
274 unsigned int sg_dma_len,
275 int direction)
Andy Groverec162272009-02-24 15:30:30 +0000276{
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200277 struct scatterlist *sg;
Andy Groverec162272009-02-24 15:30:30 +0000278 unsigned int i;
279
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200280 for_each_sg(sglist, sg, sg_dma_len, i) {
Andy Groverec162272009-02-24 15:30:30 +0000281 ib_dma_sync_single_for_cpu(dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200282 ib_sg_dma_address(dev, sg),
283 ib_sg_dma_len(dev, sg),
Andy Groverec162272009-02-24 15:30:30 +0000284 direction);
285 }
286}
287#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
288
289static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200290 struct scatterlist *sglist,
291 unsigned int sg_dma_len,
292 int direction)
Andy Groverec162272009-02-24 15:30:30 +0000293{
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200294 struct scatterlist *sg;
Andy Groverec162272009-02-24 15:30:30 +0000295 unsigned int i;
296
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200297 for_each_sg(sglist, sg, sg_dma_len, i) {
Andy Groverec162272009-02-24 15:30:30 +0000298 ib_dma_sync_single_for_device(dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200299 ib_sg_dma_address(dev, sg),
300 ib_sg_dma_len(dev, sg),
Andy Groverec162272009-02-24 15:30:30 +0000301 direction);
302 }
303}
304#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
305
306
307/* ib.c */
308extern struct rds_transport rds_ib_transport;
Zach Brown3e0249f2010-05-18 15:48:51 -0700309struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
310void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
Andy Groverec162272009-02-24 15:30:30 +0000311extern struct ib_client rds_ib_client;
312
Andy Grover3ba23ad2009-07-17 13:13:22 +0000313extern unsigned int rds_ib_retry_count;
Andy Groverec162272009-02-24 15:30:30 +0000314
315extern spinlock_t ib_nodev_conns_lock;
316extern struct list_head ib_nodev_conns;
317
318/* ib_cm.c */
319int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
320void rds_ib_conn_free(void *arg);
321int rds_ib_conn_connect(struct rds_connection *conn);
322void rds_ib_conn_shutdown(struct rds_connection *conn);
323void rds_ib_state_change(struct sock *sk);
Zach Brownef87b7e2010-07-09 12:26:20 -0700324int rds_ib_listen_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000325void rds_ib_listen_stop(void);
326void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
327int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
328 struct rdma_cm_event *event);
329int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
330void rds_ib_cm_connect_complete(struct rds_connection *conn,
331 struct rdma_cm_event *event);
332
333
334#define rds_ib_conn_error(conn, fmt...) \
335 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
336
337/* ib_rdma.c */
338int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
Andy Grover745cbcc2009-04-01 08:20:19 +0000339void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
340void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
Zach Brown8aeb1ba2010-06-25 14:58:16 -0700341void rds_ib_destroy_nodev_conns(void);
Andy Groverec162272009-02-24 15:30:30 +0000342
343/* ib_recv.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700344int rds_ib_recv_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000345void rds_ib_recv_exit(void);
346int rds_ib_recv(struct rds_connection *conn);
Chris Mason33244122010-05-26 22:05:37 -0700347int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
348void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700349void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
Andy Groverec162272009-02-24 15:30:30 +0000350void rds_ib_inc_free(struct rds_incoming *inc);
Al Viroc310e722014-11-20 09:21:14 -0500351int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400352void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
353 struct rds_ib_ack_state *state);
Andy Groverd521b632009-10-30 08:51:57 +0000354void rds_ib_recv_tasklet_fn(unsigned long data);
Andy Groverec162272009-02-24 15:30:30 +0000355void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
356void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
357void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
358void rds_ib_attempt_ack(struct rds_ib_connection *ic);
359void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
360u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400361void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required);
Andy Groverec162272009-02-24 15:30:30 +0000362
363/* ib_ring.c */
364void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
365void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
366u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
367void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
368void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
369int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
370int rds_ib_ring_low(struct rds_ib_work_ring *ring);
371u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
372u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
373extern wait_queue_head_t rds_ib_ring_empty_wait;
374
375/* ib_send.c */
376void rds_ib_xmit_complete(struct rds_connection *conn);
377int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
378 unsigned int hdr_off, unsigned int sg, unsigned int off);
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400379void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
Andy Groverec162272009-02-24 15:30:30 +0000380void rds_ib_send_init_ring(struct rds_ib_connection *ic);
381void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800382int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000383void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
384void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
385int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
Steve Wise7b70d032009-04-09 14:09:39 +0000386 u32 *adv_credits, int need_posted, int max_posted);
Andy Groverff3d7d32010-03-01 14:03:09 -0800387int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000388
389/* ib_stats.c */
390DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
391#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
392unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
393 unsigned int avail);
394
395/* ib_sysctl.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700396int rds_ib_sysctl_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000397void rds_ib_sysctl_exit(void);
398extern unsigned long rds_ib_sysctl_max_send_wr;
399extern unsigned long rds_ib_sysctl_max_recv_wr;
400extern unsigned long rds_ib_sysctl_max_unsig_wrs;
401extern unsigned long rds_ib_sysctl_max_unsig_bytes;
402extern unsigned long rds_ib_sysctl_max_recv_allocation;
403extern unsigned int rds_ib_sysctl_flow_control;
Andy Groverec162272009-02-24 15:30:30 +0000404
Andy Groverec162272009-02-24 15:30:30 +0000405#endif