blob: 4bc3e2fba25a2c7dee9b478155c41d3ea2baa646 [file] [log] [blame]
Andy Groverec162272009-02-24 15:30:30 +00001#ifndef _RDS_IB_H
2#define _RDS_IB_H
3
4#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h>
Andy Grovere4c52c92010-04-23 10:49:53 -07006#include <linux/pci.h>
7#include <linux/slab.h>
Andy Groverec162272009-02-24 15:30:30 +00008#include "rds.h"
9#include "rdma_transport.h"
10
11#define RDS_FMR_SIZE 256
12#define RDS_FMR_POOL_SIZE 4096
13
14#define RDS_IB_MAX_SGE 8
15#define RDS_IB_RECV_SGE 2
16
17#define RDS_IB_DEFAULT_RECV_WR 1024
18#define RDS_IB_DEFAULT_SEND_WR 256
19
Andy Grover3ba23ad2009-07-17 13:13:22 +000020#define RDS_IB_DEFAULT_RETRY_COUNT 2
21
Andy Groverec162272009-02-24 15:30:30 +000022#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
23
24extern struct list_head rds_ib_devices;
25
26/*
27 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
28 * try and minimize the amount of memory tied up both the device and
29 * socket receive queues.
30 */
31/* page offset of the final full frag that fits in the page */
32#define RDS_PAGE_LAST_OFF (((PAGE_SIZE / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE)
33struct rds_page_frag {
34 struct list_head f_item;
35 struct page *f_page;
36 unsigned long f_offset;
37 dma_addr_t f_mapped;
38};
39
40struct rds_ib_incoming {
41 struct list_head ii_frags;
42 struct rds_incoming ii_inc;
43};
44
45struct rds_ib_connect_private {
46 /* Add new fields at the end, and don't permute existing fields. */
47 __be32 dp_saddr;
48 __be32 dp_daddr;
49 u8 dp_protocol_major;
50 u8 dp_protocol_minor;
51 __be16 dp_protocol_minor_mask; /* bitmask */
52 __be32 dp_reserved1;
53 __be64 dp_ack_seq;
54 __be32 dp_credit; /* non-zero enables flow ctl */
55};
56
57struct rds_ib_send_work {
Andy Groverff3d7d32010-03-01 14:03:09 -080058 void *s_op;
Andy Groverec162272009-02-24 15:30:30 +000059 struct ib_send_wr s_wr;
60 struct ib_sge s_sge[RDS_IB_MAX_SGE];
61 unsigned long s_queued;
62};
63
64struct rds_ib_recv_work {
65 struct rds_ib_incoming *r_ibinc;
66 struct rds_page_frag *r_frag;
67 struct ib_recv_wr r_wr;
68 struct ib_sge r_sge[2];
69};
70
71struct rds_ib_work_ring {
72 u32 w_nr;
73 u32 w_alloc_ptr;
74 u32 w_alloc_ctr;
75 u32 w_free_ptr;
76 atomic_t w_free_ctr;
77};
78
79struct rds_ib_device;
80
81struct rds_ib_connection {
82
83 struct list_head ib_node;
84 struct rds_ib_device *rds_ibdev;
85 struct rds_connection *conn;
86
87 /* alphabet soup, IBTA style */
88 struct rdma_cm_id *i_cm_id;
89 struct ib_pd *i_pd;
90 struct ib_mr *i_mr;
91 struct ib_cq *i_send_cq;
92 struct ib_cq *i_recv_cq;
93
94 /* tx */
95 struct rds_ib_work_ring i_send_ring;
Andy Groverff3d7d32010-03-01 14:03:09 -080096 struct rm_data_op *i_data_op;
Andy Groverec162272009-02-24 15:30:30 +000097 struct rds_header *i_send_hdrs;
98 u64 i_send_hdrs_dma;
99 struct rds_ib_send_work *i_sends;
100
101 /* rx */
Andy Groverd521b632009-10-30 08:51:57 +0000102 struct tasklet_struct i_recv_tasklet;
Andy Groverec162272009-02-24 15:30:30 +0000103 struct mutex i_recv_mutex;
104 struct rds_ib_work_ring i_recv_ring;
105 struct rds_ib_incoming *i_ibinc;
106 u32 i_recv_data_rem;
107 struct rds_header *i_recv_hdrs;
108 u64 i_recv_hdrs_dma;
109 struct rds_ib_recv_work *i_recvs;
110 struct rds_page_frag i_frag;
111 u64 i_ack_recv; /* last ACK received */
112
113 /* sending acks */
114 unsigned long i_ack_flags;
Andy Grover8cbd9602009-04-01 08:20:20 +0000115#ifdef KERNEL_HAS_ATOMIC64
116 atomic64_t i_ack_next; /* next ACK to send */
117#else
118 spinlock_t i_ack_lock; /* protect i_ack_next */
Andy Groverec162272009-02-24 15:30:30 +0000119 u64 i_ack_next; /* next ACK to send */
Andy Grover8cbd9602009-04-01 08:20:20 +0000120#endif
Andy Groverec162272009-02-24 15:30:30 +0000121 struct rds_header *i_ack;
122 struct ib_send_wr i_ack_wr;
123 struct ib_sge i_ack_sge;
124 u64 i_ack_dma;
125 unsigned long i_ack_queued;
126
127 /* Flow control related information
128 *
129 * Our algorithm uses a pair variables that we need to access
130 * atomically - one for the send credits, and one posted
131 * recv credits we need to transfer to remote.
132 * Rather than protect them using a slow spinlock, we put both into
133 * a single atomic_t and update it using cmpxchg
134 */
135 atomic_t i_credits;
136
137 /* Protocol version specific information */
138 unsigned int i_flowctl:1; /* enable/disable flow ctl */
139
140 /* Batched completions */
141 unsigned int i_unsignaled_wrs;
Andy Groverec162272009-02-24 15:30:30 +0000142};
143
144/* This assumes that atomic_t is at least 32 bits */
145#define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
146#define IB_GET_POST_CREDITS(v) ((v) >> 16)
147#define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
148#define IB_SET_POST_CREDITS(v) ((v) << 16)
149
150struct rds_ib_ipaddr {
151 struct list_head list;
152 __be32 ipaddr;
153};
154
155struct rds_ib_device {
156 struct list_head list;
157 struct list_head ipaddr_list;
158 struct list_head conn_list;
159 struct ib_device *dev;
160 struct ib_pd *pd;
161 struct ib_mr *mr;
162 struct rds_ib_mr_pool *mr_pool;
Andy Groverec162272009-02-24 15:30:30 +0000163 unsigned int fmr_max_remaps;
164 unsigned int max_fmrs;
165 int max_sge;
166 unsigned int max_wrs;
Andy Grover40589e72010-01-12 10:50:48 -0800167 unsigned int max_initiator_depth;
168 unsigned int max_responder_resources;
Andy Groverec162272009-02-24 15:30:30 +0000169 spinlock_t spinlock; /* protect the above */
170};
171
Andy Grovere4c52c92010-04-23 10:49:53 -0700172#define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus)
173#define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device))
174#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
175
Andy Groverec162272009-02-24 15:30:30 +0000176/* bits for i_ack_flags */
177#define IB_ACK_IN_FLIGHT 0
178#define IB_ACK_REQUESTED 1
179
180/* Magic WR_ID for ACKs */
181#define RDS_IB_ACK_WR_ID (~(u64) 0)
182
183struct rds_ib_statistics {
184 uint64_t s_ib_connect_raced;
185 uint64_t s_ib_listen_closed_stale;
186 uint64_t s_ib_tx_cq_call;
187 uint64_t s_ib_tx_cq_event;
188 uint64_t s_ib_tx_ring_full;
189 uint64_t s_ib_tx_throttle;
190 uint64_t s_ib_tx_sg_mapping_failure;
191 uint64_t s_ib_tx_stalled;
192 uint64_t s_ib_tx_credit_updates;
193 uint64_t s_ib_rx_cq_call;
194 uint64_t s_ib_rx_cq_event;
195 uint64_t s_ib_rx_ring_empty;
196 uint64_t s_ib_rx_refill_from_cq;
197 uint64_t s_ib_rx_refill_from_thread;
198 uint64_t s_ib_rx_alloc_limit;
199 uint64_t s_ib_rx_credit_updates;
200 uint64_t s_ib_ack_sent;
201 uint64_t s_ib_ack_send_failure;
202 uint64_t s_ib_ack_send_delayed;
203 uint64_t s_ib_ack_send_piggybacked;
204 uint64_t s_ib_ack_received;
205 uint64_t s_ib_rdma_mr_alloc;
206 uint64_t s_ib_rdma_mr_free;
207 uint64_t s_ib_rdma_mr_used;
208 uint64_t s_ib_rdma_mr_pool_flush;
209 uint64_t s_ib_rdma_mr_pool_wait;
210 uint64_t s_ib_rdma_mr_pool_depleted;
Andy Grover51e2cba2010-03-29 17:47:30 -0700211 uint64_t s_ib_atomic_cswp;
212 uint64_t s_ib_atomic_fadd;
Andy Groverec162272009-02-24 15:30:30 +0000213};
214
215extern struct workqueue_struct *rds_ib_wq;
216
217/*
218 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
219 * doesn't define it.
220 */
221static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
222 struct scatterlist *sg, unsigned int sg_dma_len, int direction)
223{
224 unsigned int i;
225
226 for (i = 0; i < sg_dma_len; ++i) {
227 ib_dma_sync_single_for_cpu(dev,
228 ib_sg_dma_address(dev, &sg[i]),
229 ib_sg_dma_len(dev, &sg[i]),
230 direction);
231 }
232}
233#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
234
235static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
236 struct scatterlist *sg, unsigned int sg_dma_len, int direction)
237{
238 unsigned int i;
239
240 for (i = 0; i < sg_dma_len; ++i) {
241 ib_dma_sync_single_for_device(dev,
242 ib_sg_dma_address(dev, &sg[i]),
243 ib_sg_dma_len(dev, &sg[i]),
244 direction);
245 }
246}
247#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
248
249
250/* ib.c */
251extern struct rds_transport rds_ib_transport;
252extern void rds_ib_add_one(struct ib_device *device);
253extern void rds_ib_remove_one(struct ib_device *device);
254extern struct ib_client rds_ib_client;
255
256extern unsigned int fmr_pool_size;
257extern unsigned int fmr_message_size;
Andy Grover3ba23ad2009-07-17 13:13:22 +0000258extern unsigned int rds_ib_retry_count;
Andy Groverec162272009-02-24 15:30:30 +0000259
260extern spinlock_t ib_nodev_conns_lock;
261extern struct list_head ib_nodev_conns;
262
263/* ib_cm.c */
264int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
265void rds_ib_conn_free(void *arg);
266int rds_ib_conn_connect(struct rds_connection *conn);
267void rds_ib_conn_shutdown(struct rds_connection *conn);
268void rds_ib_state_change(struct sock *sk);
269int __init rds_ib_listen_init(void);
270void rds_ib_listen_stop(void);
271void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
272int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
273 struct rdma_cm_event *event);
274int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
275void rds_ib_cm_connect_complete(struct rds_connection *conn,
276 struct rdma_cm_event *event);
277
278
279#define rds_ib_conn_error(conn, fmt...) \
280 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
281
282/* ib_rdma.c */
283int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
Andy Grover745cbcc2009-04-01 08:20:19 +0000284void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
285void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
286void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock);
287static inline void rds_ib_destroy_nodev_conns(void)
288{
289 __rds_ib_destroy_conns(&ib_nodev_conns, &ib_nodev_conns_lock);
290}
291static inline void rds_ib_destroy_conns(struct rds_ib_device *rds_ibdev)
292{
293 __rds_ib_destroy_conns(&rds_ibdev->conn_list, &rds_ibdev->spinlock);
294}
Andy Groverec162272009-02-24 15:30:30 +0000295struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *);
296void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo);
297void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
298void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
299 struct rds_sock *rs, u32 *key_ret);
300void rds_ib_sync_mr(void *trans_private, int dir);
301void rds_ib_free_mr(void *trans_private, int invalidate);
302void rds_ib_flush_mrs(void);
303
304/* ib_recv.c */
305int __init rds_ib_recv_init(void);
306void rds_ib_recv_exit(void);
307int rds_ib_recv(struct rds_connection *conn);
Andy Groverf17a1a52010-03-18 17:19:52 -0700308int rds_ib_recv_refill(struct rds_connection *conn, int prefill);
Andy Groverec162272009-02-24 15:30:30 +0000309void rds_ib_inc_free(struct rds_incoming *inc);
310int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
311 size_t size);
312void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
Andy Groverd521b632009-10-30 08:51:57 +0000313void rds_ib_recv_tasklet_fn(unsigned long data);
Andy Groverec162272009-02-24 15:30:30 +0000314void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
315void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
316void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
317void rds_ib_attempt_ack(struct rds_ib_connection *ic);
318void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
319u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
320
321/* ib_ring.c */
322void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
323void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
324u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
325void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
326void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
327int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
328int rds_ib_ring_low(struct rds_ib_work_ring *ring);
329u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
330u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
331extern wait_queue_head_t rds_ib_ring_empty_wait;
332
333/* ib_send.c */
334void rds_ib_xmit_complete(struct rds_connection *conn);
335int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
336 unsigned int hdr_off, unsigned int sg, unsigned int off);
337void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context);
338void rds_ib_send_init_ring(struct rds_ib_connection *ic);
339void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800340int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000341void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
342void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
343int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
Steve Wise7b70d032009-04-09 14:09:39 +0000344 u32 *adv_credits, int need_posted, int max_posted);
Andy Groverff3d7d32010-03-01 14:03:09 -0800345int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000346
347/* ib_stats.c */
348DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
349#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
350unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
351 unsigned int avail);
352
353/* ib_sysctl.c */
354int __init rds_ib_sysctl_init(void);
355void rds_ib_sysctl_exit(void);
356extern unsigned long rds_ib_sysctl_max_send_wr;
357extern unsigned long rds_ib_sysctl_max_recv_wr;
358extern unsigned long rds_ib_sysctl_max_unsig_wrs;
359extern unsigned long rds_ib_sysctl_max_unsig_bytes;
360extern unsigned long rds_ib_sysctl_max_recv_allocation;
361extern unsigned int rds_ib_sysctl_flow_control;
362extern ctl_table rds_ib_sysctl_table[];
363
Andy Groverec162272009-02-24 15:30:30 +0000364#endif