blob: a13ced504145fbb13c38aa1b96e15d2f351e73cd [file] [log] [blame]
Andy Groverec162272009-02-24 15:30:30 +00001#ifndef _RDS_IB_H
2#define _RDS_IB_H
3
4#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h>
Andy Grovere4c52c92010-04-23 10:49:53 -07006#include <linux/pci.h>
7#include <linux/slab.h>
Andy Groverec162272009-02-24 15:30:30 +00008#include "rds.h"
9#include "rdma_transport.h"
10
11#define RDS_FMR_SIZE 256
Chris Masoneabb7322010-06-11 11:18:57 -070012#define RDS_FMR_POOL_SIZE 8192
Andy Groverec162272009-02-24 15:30:30 +000013
14#define RDS_IB_MAX_SGE 8
15#define RDS_IB_RECV_SGE 2
16
17#define RDS_IB_DEFAULT_RECV_WR 1024
18#define RDS_IB_DEFAULT_SEND_WR 256
19
Andy Grover3ba23ad2009-07-17 13:13:22 +000020#define RDS_IB_DEFAULT_RETRY_COUNT 2
21
Andy Groverec162272009-02-24 15:30:30 +000022#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
23
Chris Mason33244122010-05-26 22:05:37 -070024#define RDS_IB_RECYCLE_BATCH_COUNT 32
25
Andy Groverec162272009-02-24 15:30:30 +000026extern struct list_head rds_ib_devices;
27
28/*
29 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
30 * try and minimize the amount of memory tied up both the device and
31 * socket receive queues.
32 */
Andy Groverec162272009-02-24 15:30:30 +000033struct rds_page_frag {
34 struct list_head f_item;
Chris Mason33244122010-05-26 22:05:37 -070035 struct list_head f_cache_entry;
Andy Grover0b088e02010-05-24 20:12:41 -070036 struct scatterlist f_sg;
Andy Groverec162272009-02-24 15:30:30 +000037};
38
39struct rds_ib_incoming {
40 struct list_head ii_frags;
Chris Mason33244122010-05-26 22:05:37 -070041 struct list_head ii_cache_entry;
Andy Groverec162272009-02-24 15:30:30 +000042 struct rds_incoming ii_inc;
43};
44
Chris Mason33244122010-05-26 22:05:37 -070045struct rds_ib_cache_head {
46 struct list_head *first;
47 unsigned long count;
48};
49
50struct rds_ib_refill_cache {
51 struct rds_ib_cache_head *percpu;
52 struct list_head *xfer;
53 struct list_head *ready;
54};
55
Andy Groverec162272009-02-24 15:30:30 +000056struct rds_ib_connect_private {
57 /* Add new fields at the end, and don't permute existing fields. */
58 __be32 dp_saddr;
59 __be32 dp_daddr;
60 u8 dp_protocol_major;
61 u8 dp_protocol_minor;
62 __be16 dp_protocol_minor_mask; /* bitmask */
63 __be32 dp_reserved1;
64 __be64 dp_ack_seq;
65 __be32 dp_credit; /* non-zero enables flow ctl */
66};
67
68struct rds_ib_send_work {
Andy Groverff3d7d32010-03-01 14:03:09 -080069 void *s_op;
Andy Groverec162272009-02-24 15:30:30 +000070 struct ib_send_wr s_wr;
71 struct ib_sge s_sge[RDS_IB_MAX_SGE];
72 unsigned long s_queued;
73};
74
75struct rds_ib_recv_work {
76 struct rds_ib_incoming *r_ibinc;
77 struct rds_page_frag *r_frag;
78 struct ib_recv_wr r_wr;
79 struct ib_sge r_sge[2];
80};
81
82struct rds_ib_work_ring {
83 u32 w_nr;
84 u32 w_alloc_ptr;
85 u32 w_alloc_ctr;
86 u32 w_free_ptr;
87 atomic_t w_free_ctr;
88};
89
90struct rds_ib_device;
91
92struct rds_ib_connection {
93
94 struct list_head ib_node;
95 struct rds_ib_device *rds_ibdev;
96 struct rds_connection *conn;
97
98 /* alphabet soup, IBTA style */
99 struct rdma_cm_id *i_cm_id;
100 struct ib_pd *i_pd;
101 struct ib_mr *i_mr;
102 struct ib_cq *i_send_cq;
103 struct ib_cq *i_recv_cq;
104
105 /* tx */
106 struct rds_ib_work_ring i_send_ring;
Andy Groverff3d7d32010-03-01 14:03:09 -0800107 struct rm_data_op *i_data_op;
Andy Groverec162272009-02-24 15:30:30 +0000108 struct rds_header *i_send_hdrs;
109 u64 i_send_hdrs_dma;
110 struct rds_ib_send_work *i_sends;
Zach Brownf0460112010-07-14 13:55:35 -0700111 atomic_t i_signaled_sends;
Andy Groverec162272009-02-24 15:30:30 +0000112
113 /* rx */
Andy Groverd521b632009-10-30 08:51:57 +0000114 struct tasklet_struct i_recv_tasklet;
Andy Groverec162272009-02-24 15:30:30 +0000115 struct mutex i_recv_mutex;
116 struct rds_ib_work_ring i_recv_ring;
117 struct rds_ib_incoming *i_ibinc;
118 u32 i_recv_data_rem;
119 struct rds_header *i_recv_hdrs;
120 u64 i_recv_hdrs_dma;
121 struct rds_ib_recv_work *i_recvs;
Andy Groverec162272009-02-24 15:30:30 +0000122 u64 i_ack_recv; /* last ACK received */
Chris Mason33244122010-05-26 22:05:37 -0700123 struct rds_ib_refill_cache i_cache_incs;
124 struct rds_ib_refill_cache i_cache_frags;
Andy Groverec162272009-02-24 15:30:30 +0000125
126 /* sending acks */
127 unsigned long i_ack_flags;
Andy Grover8cbd9602009-04-01 08:20:20 +0000128#ifdef KERNEL_HAS_ATOMIC64
129 atomic64_t i_ack_next; /* next ACK to send */
130#else
131 spinlock_t i_ack_lock; /* protect i_ack_next */
Andy Groverec162272009-02-24 15:30:30 +0000132 u64 i_ack_next; /* next ACK to send */
Andy Grover8cbd9602009-04-01 08:20:20 +0000133#endif
Andy Groverec162272009-02-24 15:30:30 +0000134 struct rds_header *i_ack;
135 struct ib_send_wr i_ack_wr;
136 struct ib_sge i_ack_sge;
137 u64 i_ack_dma;
138 unsigned long i_ack_queued;
139
140 /* Flow control related information
141 *
142 * Our algorithm uses a pair variables that we need to access
143 * atomically - one for the send credits, and one posted
144 * recv credits we need to transfer to remote.
145 * Rather than protect them using a slow spinlock, we put both into
146 * a single atomic_t and update it using cmpxchg
147 */
148 atomic_t i_credits;
149
150 /* Protocol version specific information */
151 unsigned int i_flowctl:1; /* enable/disable flow ctl */
152
153 /* Batched completions */
154 unsigned int i_unsignaled_wrs;
Andy Groverec162272009-02-24 15:30:30 +0000155};
156
157/* This assumes that atomic_t is at least 32 bits */
158#define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
159#define IB_GET_POST_CREDITS(v) ((v) >> 16)
160#define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
161#define IB_SET_POST_CREDITS(v) ((v) << 16)
162
163struct rds_ib_ipaddr {
164 struct list_head list;
165 __be32 ipaddr;
166};
167
168struct rds_ib_device {
169 struct list_head list;
170 struct list_head ipaddr_list;
171 struct list_head conn_list;
172 struct ib_device *dev;
173 struct ib_pd *pd;
174 struct ib_mr *mr;
175 struct rds_ib_mr_pool *mr_pool;
Andy Groverec162272009-02-24 15:30:30 +0000176 unsigned int fmr_max_remaps;
177 unsigned int max_fmrs;
178 int max_sge;
179 unsigned int max_wrs;
Andy Grover40589e72010-01-12 10:50:48 -0800180 unsigned int max_initiator_depth;
181 unsigned int max_responder_resources;
Andy Groverec162272009-02-24 15:30:30 +0000182 spinlock_t spinlock; /* protect the above */
Zach Brown3e0249f2010-05-18 15:48:51 -0700183 atomic_t refcount;
184 struct work_struct free_work;
Andy Groverec162272009-02-24 15:30:30 +0000185};
186
Andy Grovere4c52c92010-04-23 10:49:53 -0700187#define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus)
188#define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device))
189#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
190
Andy Groverec162272009-02-24 15:30:30 +0000191/* bits for i_ack_flags */
192#define IB_ACK_IN_FLIGHT 0
193#define IB_ACK_REQUESTED 1
194
195/* Magic WR_ID for ACKs */
196#define RDS_IB_ACK_WR_ID (~(u64) 0)
197
198struct rds_ib_statistics {
199 uint64_t s_ib_connect_raced;
200 uint64_t s_ib_listen_closed_stale;
201 uint64_t s_ib_tx_cq_call;
202 uint64_t s_ib_tx_cq_event;
203 uint64_t s_ib_tx_ring_full;
204 uint64_t s_ib_tx_throttle;
205 uint64_t s_ib_tx_sg_mapping_failure;
206 uint64_t s_ib_tx_stalled;
207 uint64_t s_ib_tx_credit_updates;
208 uint64_t s_ib_rx_cq_call;
209 uint64_t s_ib_rx_cq_event;
210 uint64_t s_ib_rx_ring_empty;
211 uint64_t s_ib_rx_refill_from_cq;
212 uint64_t s_ib_rx_refill_from_thread;
213 uint64_t s_ib_rx_alloc_limit;
214 uint64_t s_ib_rx_credit_updates;
215 uint64_t s_ib_ack_sent;
216 uint64_t s_ib_ack_send_failure;
217 uint64_t s_ib_ack_send_delayed;
218 uint64_t s_ib_ack_send_piggybacked;
219 uint64_t s_ib_ack_received;
220 uint64_t s_ib_rdma_mr_alloc;
221 uint64_t s_ib_rdma_mr_free;
222 uint64_t s_ib_rdma_mr_used;
223 uint64_t s_ib_rdma_mr_pool_flush;
224 uint64_t s_ib_rdma_mr_pool_wait;
225 uint64_t s_ib_rdma_mr_pool_depleted;
Andy Grover51e2cba2010-03-29 17:47:30 -0700226 uint64_t s_ib_atomic_cswp;
227 uint64_t s_ib_atomic_fadd;
Andy Groverec162272009-02-24 15:30:30 +0000228};
229
230extern struct workqueue_struct *rds_ib_wq;
231
232/*
233 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
234 * doesn't define it.
235 */
236static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
237 struct scatterlist *sg, unsigned int sg_dma_len, int direction)
238{
239 unsigned int i;
240
241 for (i = 0; i < sg_dma_len; ++i) {
242 ib_dma_sync_single_for_cpu(dev,
243 ib_sg_dma_address(dev, &sg[i]),
244 ib_sg_dma_len(dev, &sg[i]),
245 direction);
246 }
247}
248#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
249
250static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
251 struct scatterlist *sg, unsigned int sg_dma_len, int direction)
252{
253 unsigned int i;
254
255 for (i = 0; i < sg_dma_len; ++i) {
256 ib_dma_sync_single_for_device(dev,
257 ib_sg_dma_address(dev, &sg[i]),
258 ib_sg_dma_len(dev, &sg[i]),
259 direction);
260 }
261}
262#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
263
264
265/* ib.c */
266extern struct rds_transport rds_ib_transport;
267extern void rds_ib_add_one(struct ib_device *device);
268extern void rds_ib_remove_one(struct ib_device *device);
Zach Brown3e0249f2010-05-18 15:48:51 -0700269struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
270void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
Andy Groverec162272009-02-24 15:30:30 +0000271extern struct ib_client rds_ib_client;
272
273extern unsigned int fmr_pool_size;
274extern unsigned int fmr_message_size;
Andy Grover3ba23ad2009-07-17 13:13:22 +0000275extern unsigned int rds_ib_retry_count;
Andy Groverec162272009-02-24 15:30:30 +0000276
277extern spinlock_t ib_nodev_conns_lock;
278extern struct list_head ib_nodev_conns;
279
280/* ib_cm.c */
281int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
282void rds_ib_conn_free(void *arg);
283int rds_ib_conn_connect(struct rds_connection *conn);
284void rds_ib_conn_shutdown(struct rds_connection *conn);
285void rds_ib_state_change(struct sock *sk);
Zach Brownef87b7e2010-07-09 12:26:20 -0700286int rds_ib_listen_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000287void rds_ib_listen_stop(void);
288void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
289int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
290 struct rdma_cm_event *event);
291int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
292void rds_ib_cm_connect_complete(struct rds_connection *conn,
293 struct rdma_cm_event *event);
294
295
296#define rds_ib_conn_error(conn, fmt...) \
297 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
298
299/* ib_rdma.c */
300int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
Andy Grover745cbcc2009-04-01 08:20:19 +0000301void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
302void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
Zach Brown8aeb1ba2010-06-25 14:58:16 -0700303void rds_ib_destroy_nodev_conns(void);
Andy Groverec162272009-02-24 15:30:30 +0000304struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *);
305void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo);
306void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
307void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
308 struct rds_sock *rs, u32 *key_ret);
309void rds_ib_sync_mr(void *trans_private, int dir);
310void rds_ib_free_mr(void *trans_private, int invalidate);
311void rds_ib_flush_mrs(void);
Zach Brownef87b7e2010-07-09 12:26:20 -0700312int rds_ib_fmr_init(void);
313void rds_ib_fmr_exit(void);
Andy Groverec162272009-02-24 15:30:30 +0000314
315/* ib_recv.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700316int rds_ib_recv_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000317void rds_ib_recv_exit(void);
318int rds_ib_recv(struct rds_connection *conn);
Chris Mason33244122010-05-26 22:05:37 -0700319int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
320void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
Andy Groverb6fb0df2010-06-23 18:06:30 -0700321void rds_ib_recv_refill(struct rds_connection *conn, int prefill);
Andy Groverec162272009-02-24 15:30:30 +0000322void rds_ib_inc_free(struct rds_incoming *inc);
323int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
324 size_t size);
325void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
Andy Groverd521b632009-10-30 08:51:57 +0000326void rds_ib_recv_tasklet_fn(unsigned long data);
Andy Groverec162272009-02-24 15:30:30 +0000327void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
328void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
329void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
330void rds_ib_attempt_ack(struct rds_ib_connection *ic);
331void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
332u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
333
334/* ib_ring.c */
335void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
336void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
337u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
338void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
339void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
340int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
341int rds_ib_ring_low(struct rds_ib_work_ring *ring);
342u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
343u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
344extern wait_queue_head_t rds_ib_ring_empty_wait;
345
346/* ib_send.c */
347void rds_ib_xmit_complete(struct rds_connection *conn);
348int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
349 unsigned int hdr_off, unsigned int sg, unsigned int off);
350void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context);
351void rds_ib_send_init_ring(struct rds_ib_connection *ic);
352void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800353int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000354void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
355void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
356int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
Steve Wise7b70d032009-04-09 14:09:39 +0000357 u32 *adv_credits, int need_posted, int max_posted);
Andy Groverff3d7d32010-03-01 14:03:09 -0800358int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000359
360/* ib_stats.c */
361DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
362#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
363unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
364 unsigned int avail);
365
366/* ib_sysctl.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700367int rds_ib_sysctl_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000368void rds_ib_sysctl_exit(void);
369extern unsigned long rds_ib_sysctl_max_send_wr;
370extern unsigned long rds_ib_sysctl_max_recv_wr;
371extern unsigned long rds_ib_sysctl_max_unsig_wrs;
372extern unsigned long rds_ib_sysctl_max_unsig_bytes;
373extern unsigned long rds_ib_sysctl_max_recv_allocation;
374extern unsigned int rds_ib_sysctl_flow_control;
375extern ctl_table rds_ib_sysctl_table[];
376
Andy Groverec162272009-02-24 15:30:30 +0000377#endif