blob: 7ad3d57e06a556563d89513bc86bb170b6cfa815 [file] [log] [blame]
Andy Groverec162272009-02-24 15:30:30 +00001#ifndef _RDS_IB_H
2#define _RDS_IB_H
3
4#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h>
Andy Grovere4c52c92010-04-23 10:49:53 -07006#include <linux/pci.h>
7#include <linux/slab.h>
Andy Groverec162272009-02-24 15:30:30 +00008#include "rds.h"
9#include "rdma_transport.h"
10
11#define RDS_FMR_SIZE 256
Chris Masoneabb7322010-06-11 11:18:57 -070012#define RDS_FMR_POOL_SIZE 8192
Andy Groverec162272009-02-24 15:30:30 +000013
14#define RDS_IB_MAX_SGE 8
15#define RDS_IB_RECV_SGE 2
16
17#define RDS_IB_DEFAULT_RECV_WR 1024
18#define RDS_IB_DEFAULT_SEND_WR 256
19
Andy Grover3ba23ad2009-07-17 13:13:22 +000020#define RDS_IB_DEFAULT_RETRY_COUNT 2
21
Andy Groverec162272009-02-24 15:30:30 +000022#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
23
Chris Mason33244122010-05-26 22:05:37 -070024#define RDS_IB_RECYCLE_BATCH_COUNT 32
25
Zach Brownea819862010-07-15 12:34:33 -070026extern struct rw_semaphore rds_ib_devices_lock;
Andy Groverec162272009-02-24 15:30:30 +000027extern struct list_head rds_ib_devices;
28
29/*
30 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
31 * try and minimize the amount of memory tied up both the device and
32 * socket receive queues.
33 */
Andy Groverec162272009-02-24 15:30:30 +000034struct rds_page_frag {
35 struct list_head f_item;
Chris Mason33244122010-05-26 22:05:37 -070036 struct list_head f_cache_entry;
Andy Grover0b088e02010-05-24 20:12:41 -070037 struct scatterlist f_sg;
Andy Groverec162272009-02-24 15:30:30 +000038};
39
40struct rds_ib_incoming {
41 struct list_head ii_frags;
Chris Mason33244122010-05-26 22:05:37 -070042 struct list_head ii_cache_entry;
Andy Groverec162272009-02-24 15:30:30 +000043 struct rds_incoming ii_inc;
44};
45
Chris Mason33244122010-05-26 22:05:37 -070046struct rds_ib_cache_head {
47 struct list_head *first;
48 unsigned long count;
49};
50
51struct rds_ib_refill_cache {
52 struct rds_ib_cache_head *percpu;
53 struct list_head *xfer;
54 struct list_head *ready;
55};
56
Andy Groverec162272009-02-24 15:30:30 +000057struct rds_ib_connect_private {
58 /* Add new fields at the end, and don't permute existing fields. */
59 __be32 dp_saddr;
60 __be32 dp_daddr;
61 u8 dp_protocol_major;
62 u8 dp_protocol_minor;
63 __be16 dp_protocol_minor_mask; /* bitmask */
64 __be32 dp_reserved1;
65 __be64 dp_ack_seq;
66 __be32 dp_credit; /* non-zero enables flow ctl */
67};
68
69struct rds_ib_send_work {
Andy Groverff3d7d32010-03-01 14:03:09 -080070 void *s_op;
Andy Groverec162272009-02-24 15:30:30 +000071 struct ib_send_wr s_wr;
72 struct ib_sge s_sge[RDS_IB_MAX_SGE];
73 unsigned long s_queued;
74};
75
76struct rds_ib_recv_work {
77 struct rds_ib_incoming *r_ibinc;
78 struct rds_page_frag *r_frag;
79 struct ib_recv_wr r_wr;
80 struct ib_sge r_sge[2];
81};
82
83struct rds_ib_work_ring {
84 u32 w_nr;
85 u32 w_alloc_ptr;
86 u32 w_alloc_ctr;
87 u32 w_free_ptr;
88 atomic_t w_free_ctr;
89};
90
91struct rds_ib_device;
92
93struct rds_ib_connection {
94
95 struct list_head ib_node;
96 struct rds_ib_device *rds_ibdev;
97 struct rds_connection *conn;
98
99 /* alphabet soup, IBTA style */
100 struct rdma_cm_id *i_cm_id;
101 struct ib_pd *i_pd;
102 struct ib_mr *i_mr;
103 struct ib_cq *i_send_cq;
104 struct ib_cq *i_recv_cq;
105
106 /* tx */
107 struct rds_ib_work_ring i_send_ring;
Andy Groverff3d7d32010-03-01 14:03:09 -0800108 struct rm_data_op *i_data_op;
Andy Groverec162272009-02-24 15:30:30 +0000109 struct rds_header *i_send_hdrs;
110 u64 i_send_hdrs_dma;
111 struct rds_ib_send_work *i_sends;
Zach Brownf0460112010-07-14 13:55:35 -0700112 atomic_t i_signaled_sends;
Andy Groverec162272009-02-24 15:30:30 +0000113
114 /* rx */
Andy Groverd521b632009-10-30 08:51:57 +0000115 struct tasklet_struct i_recv_tasklet;
Andy Groverec162272009-02-24 15:30:30 +0000116 struct mutex i_recv_mutex;
117 struct rds_ib_work_ring i_recv_ring;
118 struct rds_ib_incoming *i_ibinc;
119 u32 i_recv_data_rem;
120 struct rds_header *i_recv_hdrs;
121 u64 i_recv_hdrs_dma;
122 struct rds_ib_recv_work *i_recvs;
Andy Groverec162272009-02-24 15:30:30 +0000123 u64 i_ack_recv; /* last ACK received */
Chris Mason33244122010-05-26 22:05:37 -0700124 struct rds_ib_refill_cache i_cache_incs;
125 struct rds_ib_refill_cache i_cache_frags;
Andy Groverec162272009-02-24 15:30:30 +0000126
127 /* sending acks */
128 unsigned long i_ack_flags;
Andy Grover8cbd9602009-04-01 08:20:20 +0000129#ifdef KERNEL_HAS_ATOMIC64
130 atomic64_t i_ack_next; /* next ACK to send */
131#else
132 spinlock_t i_ack_lock; /* protect i_ack_next */
Andy Groverec162272009-02-24 15:30:30 +0000133 u64 i_ack_next; /* next ACK to send */
Andy Grover8cbd9602009-04-01 08:20:20 +0000134#endif
Andy Groverec162272009-02-24 15:30:30 +0000135 struct rds_header *i_ack;
136 struct ib_send_wr i_ack_wr;
137 struct ib_sge i_ack_sge;
138 u64 i_ack_dma;
139 unsigned long i_ack_queued;
140
141 /* Flow control related information
142 *
143 * Our algorithm uses a pair variables that we need to access
144 * atomically - one for the send credits, and one posted
145 * recv credits we need to transfer to remote.
146 * Rather than protect them using a slow spinlock, we put both into
147 * a single atomic_t and update it using cmpxchg
148 */
149 atomic_t i_credits;
150
151 /* Protocol version specific information */
152 unsigned int i_flowctl:1; /* enable/disable flow ctl */
153
154 /* Batched completions */
155 unsigned int i_unsignaled_wrs;
Andy Groverec162272009-02-24 15:30:30 +0000156};
157
158/* This assumes that atomic_t is at least 32 bits */
159#define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
160#define IB_GET_POST_CREDITS(v) ((v) >> 16)
161#define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
162#define IB_SET_POST_CREDITS(v) ((v) << 16)
163
164struct rds_ib_ipaddr {
165 struct list_head list;
166 __be32 ipaddr;
167};
168
169struct rds_ib_device {
170 struct list_head list;
171 struct list_head ipaddr_list;
172 struct list_head conn_list;
173 struct ib_device *dev;
174 struct ib_pd *pd;
175 struct ib_mr *mr;
176 struct rds_ib_mr_pool *mr_pool;
Andy Groverec162272009-02-24 15:30:30 +0000177 unsigned int fmr_max_remaps;
178 unsigned int max_fmrs;
179 int max_sge;
180 unsigned int max_wrs;
Andy Grover40589e72010-01-12 10:50:48 -0800181 unsigned int max_initiator_depth;
182 unsigned int max_responder_resources;
Andy Groverec162272009-02-24 15:30:30 +0000183 spinlock_t spinlock; /* protect the above */
Zach Brown3e0249f2010-05-18 15:48:51 -0700184 atomic_t refcount;
185 struct work_struct free_work;
Andy Groverec162272009-02-24 15:30:30 +0000186};
187
Andy Grovere4c52c92010-04-23 10:49:53 -0700188#define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus)
189#define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device))
190#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
191
Andy Groverec162272009-02-24 15:30:30 +0000192/* bits for i_ack_flags */
193#define IB_ACK_IN_FLIGHT 0
194#define IB_ACK_REQUESTED 1
195
196/* Magic WR_ID for ACKs */
197#define RDS_IB_ACK_WR_ID (~(u64) 0)
198
199struct rds_ib_statistics {
200 uint64_t s_ib_connect_raced;
201 uint64_t s_ib_listen_closed_stale;
202 uint64_t s_ib_tx_cq_call;
203 uint64_t s_ib_tx_cq_event;
204 uint64_t s_ib_tx_ring_full;
205 uint64_t s_ib_tx_throttle;
206 uint64_t s_ib_tx_sg_mapping_failure;
207 uint64_t s_ib_tx_stalled;
208 uint64_t s_ib_tx_credit_updates;
209 uint64_t s_ib_rx_cq_call;
210 uint64_t s_ib_rx_cq_event;
211 uint64_t s_ib_rx_ring_empty;
212 uint64_t s_ib_rx_refill_from_cq;
213 uint64_t s_ib_rx_refill_from_thread;
214 uint64_t s_ib_rx_alloc_limit;
215 uint64_t s_ib_rx_credit_updates;
216 uint64_t s_ib_ack_sent;
217 uint64_t s_ib_ack_send_failure;
218 uint64_t s_ib_ack_send_delayed;
219 uint64_t s_ib_ack_send_piggybacked;
220 uint64_t s_ib_ack_received;
221 uint64_t s_ib_rdma_mr_alloc;
222 uint64_t s_ib_rdma_mr_free;
223 uint64_t s_ib_rdma_mr_used;
224 uint64_t s_ib_rdma_mr_pool_flush;
225 uint64_t s_ib_rdma_mr_pool_wait;
226 uint64_t s_ib_rdma_mr_pool_depleted;
Andy Grover51e2cba2010-03-29 17:47:30 -0700227 uint64_t s_ib_atomic_cswp;
228 uint64_t s_ib_atomic_fadd;
Andy Groverec162272009-02-24 15:30:30 +0000229};
230
231extern struct workqueue_struct *rds_ib_wq;
232
233/*
234 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
235 * doesn't define it.
236 */
237static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
238 struct scatterlist *sg, unsigned int sg_dma_len, int direction)
239{
240 unsigned int i;
241
242 for (i = 0; i < sg_dma_len; ++i) {
243 ib_dma_sync_single_for_cpu(dev,
244 ib_sg_dma_address(dev, &sg[i]),
245 ib_sg_dma_len(dev, &sg[i]),
246 direction);
247 }
248}
249#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
250
251static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
252 struct scatterlist *sg, unsigned int sg_dma_len, int direction)
253{
254 unsigned int i;
255
256 for (i = 0; i < sg_dma_len; ++i) {
257 ib_dma_sync_single_for_device(dev,
258 ib_sg_dma_address(dev, &sg[i]),
259 ib_sg_dma_len(dev, &sg[i]),
260 direction);
261 }
262}
263#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
264
265
266/* ib.c */
267extern struct rds_transport rds_ib_transport;
268extern void rds_ib_add_one(struct ib_device *device);
269extern void rds_ib_remove_one(struct ib_device *device);
Zach Brown3e0249f2010-05-18 15:48:51 -0700270struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
271void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
Andy Groverec162272009-02-24 15:30:30 +0000272extern struct ib_client rds_ib_client;
273
274extern unsigned int fmr_pool_size;
275extern unsigned int fmr_message_size;
Andy Grover3ba23ad2009-07-17 13:13:22 +0000276extern unsigned int rds_ib_retry_count;
Andy Groverec162272009-02-24 15:30:30 +0000277
278extern spinlock_t ib_nodev_conns_lock;
279extern struct list_head ib_nodev_conns;
280
281/* ib_cm.c */
282int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
283void rds_ib_conn_free(void *arg);
284int rds_ib_conn_connect(struct rds_connection *conn);
285void rds_ib_conn_shutdown(struct rds_connection *conn);
286void rds_ib_state_change(struct sock *sk);
Zach Brownef87b7e2010-07-09 12:26:20 -0700287int rds_ib_listen_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000288void rds_ib_listen_stop(void);
289void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
290int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
291 struct rdma_cm_event *event);
292int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
293void rds_ib_cm_connect_complete(struct rds_connection *conn,
294 struct rdma_cm_event *event);
295
296
297#define rds_ib_conn_error(conn, fmt...) \
298 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
299
300/* ib_rdma.c */
301int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
Andy Grover745cbcc2009-04-01 08:20:19 +0000302void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
303void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
Zach Brown8aeb1ba2010-06-25 14:58:16 -0700304void rds_ib_destroy_nodev_conns(void);
Andy Groverec162272009-02-24 15:30:30 +0000305struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *);
306void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo);
307void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
308void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
309 struct rds_sock *rs, u32 *key_ret);
310void rds_ib_sync_mr(void *trans_private, int dir);
311void rds_ib_free_mr(void *trans_private, int invalidate);
312void rds_ib_flush_mrs(void);
Zach Brownef87b7e2010-07-09 12:26:20 -0700313int rds_ib_fmr_init(void);
314void rds_ib_fmr_exit(void);
Andy Groverec162272009-02-24 15:30:30 +0000315
316/* ib_recv.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700317int rds_ib_recv_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000318void rds_ib_recv_exit(void);
319int rds_ib_recv(struct rds_connection *conn);
Chris Mason33244122010-05-26 22:05:37 -0700320int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
321void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
Andy Groverb6fb0df2010-06-23 18:06:30 -0700322void rds_ib_recv_refill(struct rds_connection *conn, int prefill);
Andy Groverec162272009-02-24 15:30:30 +0000323void rds_ib_inc_free(struct rds_incoming *inc);
324int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
325 size_t size);
326void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
Andy Groverd521b632009-10-30 08:51:57 +0000327void rds_ib_recv_tasklet_fn(unsigned long data);
Andy Groverec162272009-02-24 15:30:30 +0000328void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
329void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
330void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
331void rds_ib_attempt_ack(struct rds_ib_connection *ic);
332void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
333u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
334
335/* ib_ring.c */
336void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
337void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
338u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
339void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
340void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
341int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
342int rds_ib_ring_low(struct rds_ib_work_ring *ring);
343u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
344u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
345extern wait_queue_head_t rds_ib_ring_empty_wait;
346
347/* ib_send.c */
Zach Brown59f740a2010-08-03 13:52:47 -0700348char *rds_ib_wc_status_str(enum ib_wc_status status);
Andy Groverec162272009-02-24 15:30:30 +0000349void rds_ib_xmit_complete(struct rds_connection *conn);
350int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
351 unsigned int hdr_off, unsigned int sg, unsigned int off);
352void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context);
353void rds_ib_send_init_ring(struct rds_ib_connection *ic);
354void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800355int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000356void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
357void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
358int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
Steve Wise7b70d032009-04-09 14:09:39 +0000359 u32 *adv_credits, int need_posted, int max_posted);
Andy Groverff3d7d32010-03-01 14:03:09 -0800360int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000361
362/* ib_stats.c */
363DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
364#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
365unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
366 unsigned int avail);
367
368/* ib_sysctl.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700369int rds_ib_sysctl_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000370void rds_ib_sysctl_exit(void);
371extern unsigned long rds_ib_sysctl_max_send_wr;
372extern unsigned long rds_ib_sysctl_max_recv_wr;
373extern unsigned long rds_ib_sysctl_max_unsig_wrs;
374extern unsigned long rds_ib_sysctl_max_unsig_bytes;
375extern unsigned long rds_ib_sysctl_max_recv_allocation;
376extern unsigned int rds_ib_sysctl_flow_control;
377extern ctl_table rds_ib_sysctl_table[];
378
Andy Groverec162272009-02-24 15:30:30 +0000379#endif