blob: a303f13111c2cfecea5fb49f4b88ef2205cedf02 [file] [log] [blame]
Andy Groverec162272009-02-24 15:30:30 +00001#ifndef _RDS_IB_H
2#define _RDS_IB_H
3
4#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h>
6#include "rds.h"
7#include "rdma_transport.h"
8
9#define RDS_FMR_SIZE 256
10#define RDS_FMR_POOL_SIZE 4096
11
12#define RDS_IB_MAX_SGE 8
13#define RDS_IB_RECV_SGE 2
14
15#define RDS_IB_DEFAULT_RECV_WR 1024
16#define RDS_IB_DEFAULT_SEND_WR 256
17
Andy Grover3ba23ad2009-07-17 13:13:22 +000018#define RDS_IB_DEFAULT_RETRY_COUNT 2
19
Andy Groverec162272009-02-24 15:30:30 +000020#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
21
22extern struct list_head rds_ib_devices;
23
24/*
25 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
26 * try and minimize the amount of memory tied up both the device and
27 * socket receive queues.
28 */
29/* page offset of the final full frag that fits in the page */
30#define RDS_PAGE_LAST_OFF (((PAGE_SIZE / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE)
31struct rds_page_frag {
32 struct list_head f_item;
33 struct page *f_page;
34 unsigned long f_offset;
35 dma_addr_t f_mapped;
36};
37
38struct rds_ib_incoming {
39 struct list_head ii_frags;
40 struct rds_incoming ii_inc;
41};
42
43struct rds_ib_connect_private {
44 /* Add new fields at the end, and don't permute existing fields. */
45 __be32 dp_saddr;
46 __be32 dp_daddr;
47 u8 dp_protocol_major;
48 u8 dp_protocol_minor;
49 __be16 dp_protocol_minor_mask; /* bitmask */
50 __be32 dp_reserved1;
51 __be64 dp_ack_seq;
52 __be32 dp_credit; /* non-zero enables flow ctl */
53};
54
55struct rds_ib_send_work {
56 struct rds_message *s_rm;
57 struct rds_rdma_op *s_op;
58 struct ib_send_wr s_wr;
59 struct ib_sge s_sge[RDS_IB_MAX_SGE];
60 unsigned long s_queued;
61};
62
63struct rds_ib_recv_work {
64 struct rds_ib_incoming *r_ibinc;
65 struct rds_page_frag *r_frag;
66 struct ib_recv_wr r_wr;
67 struct ib_sge r_sge[2];
68};
69
70struct rds_ib_work_ring {
71 u32 w_nr;
72 u32 w_alloc_ptr;
73 u32 w_alloc_ctr;
74 u32 w_free_ptr;
75 atomic_t w_free_ctr;
76};
77
78struct rds_ib_device;
79
80struct rds_ib_connection {
81
82 struct list_head ib_node;
83 struct rds_ib_device *rds_ibdev;
84 struct rds_connection *conn;
85
86 /* alphabet soup, IBTA style */
87 struct rdma_cm_id *i_cm_id;
88 struct ib_pd *i_pd;
89 struct ib_mr *i_mr;
90 struct ib_cq *i_send_cq;
91 struct ib_cq *i_recv_cq;
92
93 /* tx */
94 struct rds_ib_work_ring i_send_ring;
95 struct rds_message *i_rm;
96 struct rds_header *i_send_hdrs;
97 u64 i_send_hdrs_dma;
98 struct rds_ib_send_work *i_sends;
99
100 /* rx */
Andy Groverd521b632009-10-30 08:51:57 +0000101 struct tasklet_struct i_recv_tasklet;
Andy Groverec162272009-02-24 15:30:30 +0000102 struct mutex i_recv_mutex;
103 struct rds_ib_work_ring i_recv_ring;
104 struct rds_ib_incoming *i_ibinc;
105 u32 i_recv_data_rem;
106 struct rds_header *i_recv_hdrs;
107 u64 i_recv_hdrs_dma;
108 struct rds_ib_recv_work *i_recvs;
109 struct rds_page_frag i_frag;
110 u64 i_ack_recv; /* last ACK received */
111
112 /* sending acks */
113 unsigned long i_ack_flags;
Andy Grover8cbd9602009-04-01 08:20:20 +0000114#ifdef KERNEL_HAS_ATOMIC64
115 atomic64_t i_ack_next; /* next ACK to send */
116#else
117 spinlock_t i_ack_lock; /* protect i_ack_next */
Andy Groverec162272009-02-24 15:30:30 +0000118 u64 i_ack_next; /* next ACK to send */
Andy Grover8cbd9602009-04-01 08:20:20 +0000119#endif
Andy Groverec162272009-02-24 15:30:30 +0000120 struct rds_header *i_ack;
121 struct ib_send_wr i_ack_wr;
122 struct ib_sge i_ack_sge;
123 u64 i_ack_dma;
124 unsigned long i_ack_queued;
125
126 /* Flow control related information
127 *
128 * Our algorithm uses a pair variables that we need to access
129 * atomically - one for the send credits, and one posted
130 * recv credits we need to transfer to remote.
131 * Rather than protect them using a slow spinlock, we put both into
132 * a single atomic_t and update it using cmpxchg
133 */
134 atomic_t i_credits;
135
136 /* Protocol version specific information */
137 unsigned int i_flowctl:1; /* enable/disable flow ctl */
138
139 /* Batched completions */
140 unsigned int i_unsignaled_wrs;
141 long i_unsignaled_bytes;
142};
143
144/* This assumes that atomic_t is at least 32 bits */
145#define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
146#define IB_GET_POST_CREDITS(v) ((v) >> 16)
147#define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
148#define IB_SET_POST_CREDITS(v) ((v) << 16)
149
150struct rds_ib_ipaddr {
151 struct list_head list;
152 __be32 ipaddr;
153};
154
155struct rds_ib_device {
156 struct list_head list;
157 struct list_head ipaddr_list;
158 struct list_head conn_list;
159 struct ib_device *dev;
160 struct ib_pd *pd;
161 struct ib_mr *mr;
162 struct rds_ib_mr_pool *mr_pool;
Andy Groverec162272009-02-24 15:30:30 +0000163 unsigned int fmr_max_remaps;
164 unsigned int max_fmrs;
165 int max_sge;
166 unsigned int max_wrs;
Andy Grover40589e72010-01-12 10:50:48 -0800167 unsigned int max_initiator_depth;
168 unsigned int max_responder_resources;
Andy Groverec162272009-02-24 15:30:30 +0000169 spinlock_t spinlock; /* protect the above */
170};
171
172/* bits for i_ack_flags */
173#define IB_ACK_IN_FLIGHT 0
174#define IB_ACK_REQUESTED 1
175
176/* Magic WR_ID for ACKs */
177#define RDS_IB_ACK_WR_ID (~(u64) 0)
178
179struct rds_ib_statistics {
180 uint64_t s_ib_connect_raced;
181 uint64_t s_ib_listen_closed_stale;
182 uint64_t s_ib_tx_cq_call;
183 uint64_t s_ib_tx_cq_event;
184 uint64_t s_ib_tx_ring_full;
185 uint64_t s_ib_tx_throttle;
186 uint64_t s_ib_tx_sg_mapping_failure;
187 uint64_t s_ib_tx_stalled;
188 uint64_t s_ib_tx_credit_updates;
189 uint64_t s_ib_rx_cq_call;
190 uint64_t s_ib_rx_cq_event;
191 uint64_t s_ib_rx_ring_empty;
192 uint64_t s_ib_rx_refill_from_cq;
193 uint64_t s_ib_rx_refill_from_thread;
194 uint64_t s_ib_rx_alloc_limit;
195 uint64_t s_ib_rx_credit_updates;
196 uint64_t s_ib_ack_sent;
197 uint64_t s_ib_ack_send_failure;
198 uint64_t s_ib_ack_send_delayed;
199 uint64_t s_ib_ack_send_piggybacked;
200 uint64_t s_ib_ack_received;
201 uint64_t s_ib_rdma_mr_alloc;
202 uint64_t s_ib_rdma_mr_free;
203 uint64_t s_ib_rdma_mr_used;
204 uint64_t s_ib_rdma_mr_pool_flush;
205 uint64_t s_ib_rdma_mr_pool_wait;
206 uint64_t s_ib_rdma_mr_pool_depleted;
207};
208
209extern struct workqueue_struct *rds_ib_wq;
210
211/*
212 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
213 * doesn't define it.
214 */
215static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
216 struct scatterlist *sg, unsigned int sg_dma_len, int direction)
217{
218 unsigned int i;
219
220 for (i = 0; i < sg_dma_len; ++i) {
221 ib_dma_sync_single_for_cpu(dev,
222 ib_sg_dma_address(dev, &sg[i]),
223 ib_sg_dma_len(dev, &sg[i]),
224 direction);
225 }
226}
227#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
228
229static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
230 struct scatterlist *sg, unsigned int sg_dma_len, int direction)
231{
232 unsigned int i;
233
234 for (i = 0; i < sg_dma_len; ++i) {
235 ib_dma_sync_single_for_device(dev,
236 ib_sg_dma_address(dev, &sg[i]),
237 ib_sg_dma_len(dev, &sg[i]),
238 direction);
239 }
240}
241#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
242
243
244/* ib.c */
245extern struct rds_transport rds_ib_transport;
246extern void rds_ib_add_one(struct ib_device *device);
247extern void rds_ib_remove_one(struct ib_device *device);
248extern struct ib_client rds_ib_client;
249
250extern unsigned int fmr_pool_size;
251extern unsigned int fmr_message_size;
Andy Grover3ba23ad2009-07-17 13:13:22 +0000252extern unsigned int rds_ib_retry_count;
Andy Groverec162272009-02-24 15:30:30 +0000253
254extern spinlock_t ib_nodev_conns_lock;
255extern struct list_head ib_nodev_conns;
256
257/* ib_cm.c */
258int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
259void rds_ib_conn_free(void *arg);
260int rds_ib_conn_connect(struct rds_connection *conn);
261void rds_ib_conn_shutdown(struct rds_connection *conn);
262void rds_ib_state_change(struct sock *sk);
263int __init rds_ib_listen_init(void);
264void rds_ib_listen_stop(void);
265void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
266int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
267 struct rdma_cm_event *event);
268int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
269void rds_ib_cm_connect_complete(struct rds_connection *conn,
270 struct rdma_cm_event *event);
271
272
273#define rds_ib_conn_error(conn, fmt...) \
274 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
275
276/* ib_rdma.c */
277int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
Andy Grover745cbcc2009-04-01 08:20:19 +0000278void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
279void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
280void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock);
281static inline void rds_ib_destroy_nodev_conns(void)
282{
283 __rds_ib_destroy_conns(&ib_nodev_conns, &ib_nodev_conns_lock);
284}
285static inline void rds_ib_destroy_conns(struct rds_ib_device *rds_ibdev)
286{
287 __rds_ib_destroy_conns(&rds_ibdev->conn_list, &rds_ibdev->spinlock);
288}
Andy Groverec162272009-02-24 15:30:30 +0000289struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *);
290void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo);
291void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
292void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
293 struct rds_sock *rs, u32 *key_ret);
294void rds_ib_sync_mr(void *trans_private, int dir);
295void rds_ib_free_mr(void *trans_private, int invalidate);
296void rds_ib_flush_mrs(void);
297
298/* ib_recv.c */
299int __init rds_ib_recv_init(void);
300void rds_ib_recv_exit(void);
301int rds_ib_recv(struct rds_connection *conn);
302int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
303 gfp_t page_gfp, int prefill);
304void rds_ib_inc_purge(struct rds_incoming *inc);
305void rds_ib_inc_free(struct rds_incoming *inc);
306int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
307 size_t size);
308void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
Andy Groverd521b632009-10-30 08:51:57 +0000309void rds_ib_recv_tasklet_fn(unsigned long data);
Andy Groverec162272009-02-24 15:30:30 +0000310void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
311void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
312void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
313void rds_ib_attempt_ack(struct rds_ib_connection *ic);
314void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
315u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
316
317/* ib_ring.c */
318void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
319void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
320u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
321void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
322void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
323int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
324int rds_ib_ring_low(struct rds_ib_work_ring *ring);
325u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
326u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
327extern wait_queue_head_t rds_ib_ring_empty_wait;
328
329/* ib_send.c */
330void rds_ib_xmit_complete(struct rds_connection *conn);
331int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
332 unsigned int hdr_off, unsigned int sg, unsigned int off);
333void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context);
334void rds_ib_send_init_ring(struct rds_ib_connection *ic);
335void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
336int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op);
337void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
338void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
339int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
Steve Wise7b70d032009-04-09 14:09:39 +0000340 u32 *adv_credits, int need_posted, int max_posted);
Andy Grover15133f62010-01-12 14:33:38 -0800341int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000342
343/* ib_stats.c */
344DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
345#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
346unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
347 unsigned int avail);
348
349/* ib_sysctl.c */
350int __init rds_ib_sysctl_init(void);
351void rds_ib_sysctl_exit(void);
352extern unsigned long rds_ib_sysctl_max_send_wr;
353extern unsigned long rds_ib_sysctl_max_recv_wr;
354extern unsigned long rds_ib_sysctl_max_unsig_wrs;
355extern unsigned long rds_ib_sysctl_max_unsig_bytes;
356extern unsigned long rds_ib_sysctl_max_recv_allocation;
357extern unsigned int rds_ib_sysctl_flow_control;
358extern ctl_table rds_ib_sysctl_table[];
359
360/*
361 * Helper functions for getting/setting the header and data SGEs in
362 * RDS packets (not RDMA)
Andy Grover02a6a252009-07-17 13:13:24 +0000363 *
364 * From version 3.1 onwards, header is in front of data in the sge.
Andy Groverec162272009-02-24 15:30:30 +0000365 */
366static inline struct ib_sge *
367rds_ib_header_sge(struct rds_ib_connection *ic, struct ib_sge *sge)
368{
Andy Grover02a6a252009-07-17 13:13:24 +0000369 if (ic->conn->c_version > RDS_PROTOCOL_3_0)
370 return &sge[0];
371 else
372 return &sge[1];
Andy Groverec162272009-02-24 15:30:30 +0000373}
374
375static inline struct ib_sge *
376rds_ib_data_sge(struct rds_ib_connection *ic, struct ib_sge *sge)
377{
Andy Grover02a6a252009-07-17 13:13:24 +0000378 if (ic->conn->c_version > RDS_PROTOCOL_3_0)
379 return &sge[1];
380 else
381 return &sge[0];
Andy Groverec162272009-02-24 15:30:30 +0000382}
383
Andy Groverec162272009-02-24 15:30:30 +0000384#endif