blob: 516bcc89b46fdfd630dad78ad432a753e977f633 [file] [log] [blame]
Andy Grover39de8282009-02-24 15:30:19 +00001#ifndef _RDS_RDS_H
2#define _RDS_RDS_H
3
4#include <net/sock.h>
5#include <linux/scatterlist.h>
6#include <linux/highmem.h>
7#include <rdma/rdma_cm.h>
8#include <linux/mutex.h>
9#include <linux/rds.h>
santosh.shilimkar@oracle.com7b565432015-10-30 08:49:10 -070010#include <linux/rhashtable.h>
Reshetova, Elenab7f02922017-07-04 15:53:16 +030011#include <linux/refcount.h>
Andy Grover39de8282009-02-24 15:30:19 +000012
13#include "info.h"
14
15/*
16 * RDS Network protocol version
17 */
18#define RDS_PROTOCOL_3_0 0x0300
19#define RDS_PROTOCOL_3_1 0x0301
20#define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
21#define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
22#define RDS_PROTOCOL_MINOR(v) ((v) & 255)
23#define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
24
25/*
26 * XXX randomly chosen, but at least seems to be unused:
27 * # 18464-18768 Unassigned
28 * We should do better. We want a reserved port to discourage unpriv'ed
29 * userspace from listening.
30 */
31#define RDS_PORT 18634
32
Andy Grover8cbd9602009-04-01 08:20:20 +000033#ifdef ATOMIC64_INIT
34#define KERNEL_HAS_ATOMIC64
35#endif
36
shamir rabinovitchff570872016-10-27 05:46:38 -040037#ifdef RDS_DEBUG
Andy Grover39de8282009-02-24 15:30:19 +000038#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
39#else
40/* sigh, pr_debug() causes unused variable warnings */
Joe Perchesb9075fa2011-10-31 17:11:33 -070041static inline __printf(1, 2)
42void rdsdebug(char *fmt, ...)
Andy Grover39de8282009-02-24 15:30:19 +000043{
44}
45#endif
46
47/* XXX is there one of these somewhere? */
48#define ceil(x, y) \
49 ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
50
51#define RDS_FRAG_SHIFT 12
52#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
53
Avinash Repakaf9fb69a2016-02-29 15:30:57 -080054/* Used to limit both RDMA and non-RDMA RDS message to 1MB */
55#define RDS_MAX_MSG_SIZE ((unsigned int)(1 << 20))
56
Andy Grover39de8282009-02-24 15:30:19 +000057#define RDS_CONG_MAP_BYTES (65536 / 8)
Andy Grover39de8282009-02-24 15:30:19 +000058#define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
59#define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
60
61struct rds_cong_map {
62 struct rb_node m_rb_node;
63 __be32 m_addr;
64 wait_queue_head_t m_waitq;
65 struct list_head m_conn_list;
66 unsigned long m_page_addrs[RDS_CONG_MAP_PAGES];
67};
68
69
70/*
71 * This is how we will track the connection state:
72 * A connection is always in one of the following
73 * states. Updates to the state are atomic and imply
74 * a memory barrier.
75 */
76enum {
77 RDS_CONN_DOWN = 0,
78 RDS_CONN_CONNECTING,
79 RDS_CONN_DISCONNECTING,
80 RDS_CONN_UP,
Sowmini Varadhan9c794402016-06-04 14:00:00 -070081 RDS_CONN_RESETTING,
Andy Grover39de8282009-02-24 15:30:19 +000082 RDS_CONN_ERROR,
83};
84
85/* Bits for c_flags */
86#define RDS_LL_SEND_FULL 0
87#define RDS_RECONNECT_PENDING 1
Zach Brown0f4b1c72010-06-04 14:41:41 -070088#define RDS_IN_XMIT 2
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -070089#define RDS_RECV_REFILL 3
Andy Grover39de8282009-02-24 15:30:19 +000090
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070091/* Max number of multipaths per RDS connection. Must be a power of 2 */
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -070092#define RDS_MPATH_WORKERS 8
93#define RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
94 (rs)->rs_hash_initval) & ((n) - 1))
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070095
Sowmini Varadhan00354de2017-06-15 11:28:54 -070096#define IS_CANONICAL(laddr, faddr) (htonl(laddr) < htonl(faddr))
97
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070098/* Per mpath connection state */
99struct rds_conn_path {
100 struct rds_connection *cp_conn;
101 struct rds_message *cp_xmit_rm;
102 unsigned long cp_xmit_sg;
103 unsigned int cp_xmit_hdr_off;
104 unsigned int cp_xmit_data_off;
105 unsigned int cp_xmit_atomic_sent;
106 unsigned int cp_xmit_rdma_sent;
107 unsigned int cp_xmit_data_sent;
108
109 spinlock_t cp_lock; /* protect msg queues */
110 u64 cp_next_tx_seq;
111 struct list_head cp_send_queue;
112 struct list_head cp_retrans;
113
114 u64 cp_next_rx_seq;
115
116 void *cp_transport_data;
117
118 atomic_t cp_state;
119 unsigned long cp_send_gen;
120 unsigned long cp_flags;
121 unsigned long cp_reconnect_jiffies;
122 struct delayed_work cp_send_w;
123 struct delayed_work cp_recv_w;
124 struct delayed_work cp_conn_w;
125 struct work_struct cp_down_w;
126 struct mutex cp_cm_lock; /* protect cp_state & cm */
127 wait_queue_head_t cp_waitq;
128
129 unsigned int cp_unacked_packets;
130 unsigned int cp_unacked_bytes;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700131 unsigned int cp_index;
132};
133
134/* One rds_connection per RDS address pair */
Andy Grover39de8282009-02-24 15:30:19 +0000135struct rds_connection {
136 struct hlist_node c_hash_node;
137 __be32 c_laddr;
138 __be32 c_faddr;
Sowmini Varadhan3b20fc32015-09-30 16:54:07 -0400139 unsigned int c_loopback:1,
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700140 c_ping_triggered:1,
Sowmini Varadhanc14b0362017-06-21 13:40:13 -0700141 c_destroy_in_prog:1,
142 c_pad_to_32:29;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700143 int c_npaths;
Andy Grover39de8282009-02-24 15:30:19 +0000144 struct rds_connection *c_passive;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700145 struct rds_transport *c_trans;
Andy Grover39de8282009-02-24 15:30:19 +0000146
147 struct rds_cong_map *c_lcong;
148 struct rds_cong_map *c_fcong;
149
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700150 /* Protocol version */
151 unsigned int c_version;
Sowmini Varadhan8edc3af2017-03-04 08:57:33 -0800152 struct net *c_net;
Andy Grover39de8282009-02-24 15:30:19 +0000153
154 struct list_head c_map_item;
155 unsigned long c_map_queued;
Andy Grover39de8282009-02-24 15:30:19 +0000156
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700157 struct rds_conn_path c_path[RDS_MPATH_WORKERS];
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700158 wait_queue_head_t c_hs_waitq; /* handshake waitq */
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800159
160 u32 c_my_gen_num;
161 u32 c_peer_gen_num;
Andy Grover39de8282009-02-24 15:30:19 +0000162};
163
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400164static inline
165struct net *rds_conn_net(struct rds_connection *conn)
166{
Sowmini Varadhan8edc3af2017-03-04 08:57:33 -0800167 return conn->c_net;
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400168}
169
170static inline
171void rds_conn_net_set(struct rds_connection *conn, struct net *net)
172{
Sowmini Varadhan8edc3af2017-03-04 08:57:33 -0800173 conn->c_net = get_net(net);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400174}
175
Andy Grover39de8282009-02-24 15:30:19 +0000176#define RDS_FLAG_CONG_BITMAP 0x01
177#define RDS_FLAG_ACK_REQUIRED 0x02
178#define RDS_FLAG_RETRANSMITTED 0x04
Steve Wise7b70d032009-04-09 14:09:39 +0000179#define RDS_MAX_ADV_CREDIT 255
Andy Grover39de8282009-02-24 15:30:19 +0000180
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700181/* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
182 * probe to exchange control information before establishing a connection.
183 * Currently the control information that is exchanged is the number of
184 * supported paths. If the peer is a legacy (older kernel revision) peer,
185 * it would return a pong message without additional control information
186 * that would then alert the sender that the peer was an older rev.
187 */
188#define RDS_FLAG_PROBE_PORT 1
189#define RDS_HS_PROBE(sport, dport) \
190 ((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
191 (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
Andy Grover39de8282009-02-24 15:30:19 +0000192/*
193 * Maximum space available for extension headers.
194 */
195#define RDS_HEADER_EXT_SPACE 16
196
197struct rds_header {
198 __be64 h_sequence;
199 __be64 h_ack;
200 __be32 h_len;
201 __be16 h_sport;
202 __be16 h_dport;
203 u8 h_flags;
204 u8 h_credit;
205 u8 h_padding[4];
206 __sum16 h_csum;
207
208 u8 h_exthdr[RDS_HEADER_EXT_SPACE];
209};
210
211/*
212 * Reserved - indicates end of extensions
213 */
214#define RDS_EXTHDR_NONE 0
215
216/*
217 * This extension header is included in the very
218 * first message that is sent on a new connection,
219 * and identifies the protocol level. This will help
220 * rolling updates if a future change requires breaking
221 * the protocol.
222 * NB: This is no longer true for IB, where we do a version
223 * negotiation during the connection setup phase (protocol
224 * version information is included in the RDMA CM private data).
225 */
226#define RDS_EXTHDR_VERSION 1
227struct rds_ext_header_version {
228 __be32 h_version;
229};
230
231/*
232 * This extension header is included in the RDS message
233 * chasing an RDMA operation.
234 */
235#define RDS_EXTHDR_RDMA 2
236struct rds_ext_header_rdma {
237 __be32 h_rdma_rkey;
238};
239
240/*
241 * This extension header tells the peer about the
242 * destination <R_Key,offset> of the requested RDMA
243 * operation.
244 */
245#define RDS_EXTHDR_RDMA_DEST 3
246struct rds_ext_header_rdma_dest {
247 __be32 h_rdma_rkey;
248 __be32 h_rdma_offset;
249};
250
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700251/* Extension header announcing number of paths.
252 * Implicit length = 2 bytes.
253 */
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800254#define RDS_EXTHDR_NPATHS 5
255#define RDS_EXTHDR_GEN_NUM 6
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700256
Andy Grover39de8282009-02-24 15:30:19 +0000257#define __RDS_EXTHDR_MAX 16 /* for now */
Santosh Shilimkar32890252016-07-04 22:35:15 -0700258#define RDS_RX_MAX_TRACES (RDS_MSG_RX_DGRAM_TRACE_MAX + 1)
259#define RDS_MSG_RX_HDR 0
260#define RDS_MSG_RX_START 1
261#define RDS_MSG_RX_END 2
262#define RDS_MSG_RX_CMSG 3
Andy Grover39de8282009-02-24 15:30:19 +0000263
264struct rds_incoming {
Reshetova, Elenab7f02922017-07-04 15:53:16 +0300265 refcount_t i_refcount;
Andy Grover39de8282009-02-24 15:30:19 +0000266 struct list_head i_item;
267 struct rds_connection *i_conn;
Sowmini Varadhanef9e62c2016-06-13 09:44:28 -0700268 struct rds_conn_path *i_conn_path;
Andy Grover39de8282009-02-24 15:30:19 +0000269 struct rds_header i_hdr;
270 unsigned long i_rx_jiffies;
271 __be32 i_saddr;
272
273 rds_rdma_cookie_t i_rdma_cookie;
santosh.shilimkar@oracle.com5711f8b2016-03-01 15:20:43 -0800274 struct timeval i_rx_tstamp;
Santosh Shilimkar32890252016-07-04 22:35:15 -0700275 u64 i_rx_lat_trace[RDS_RX_MAX_TRACES];
Andy Grover39de8282009-02-24 15:30:19 +0000276};
277
Andy Grover21f79af2010-01-12 12:57:27 -0800278struct rds_mr {
279 struct rb_node r_rb_node;
Reshetova, Elena803ea852017-07-04 15:53:17 +0300280 refcount_t r_refcount;
Andy Grover21f79af2010-01-12 12:57:27 -0800281 u32 r_key;
282
283 /* A copy of the creation flags */
284 unsigned int r_use_once:1;
285 unsigned int r_invalidate:1;
286 unsigned int r_write:1;
287
288 /* This is for RDS_MR_DEAD.
289 * It would be nice & consistent to make this part of the above
290 * bit field here, but we need to use test_and_set_bit.
291 */
292 unsigned long r_state;
293 struct rds_sock *r_sock; /* back pointer to the socket that owns us */
294 struct rds_transport *r_trans;
295 void *r_trans_private;
296};
297
298/* Flags for mr->r_state */
299#define RDS_MR_DEAD 0
300
Andy Grover21f79af2010-01-12 12:57:27 -0800301static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
302{
303 return r_key | (((u64) offset) << 32);
304}
305
306static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
307{
308 return cookie;
309}
310
311static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
312{
313 return cookie >> 32;
314}
315
Andy Grover15133f62010-01-12 14:33:38 -0800316/* atomic operation types */
317#define RDS_ATOMIC_TYPE_CSWP 0
318#define RDS_ATOMIC_TYPE_FADD 1
319
Andy Grover39de8282009-02-24 15:30:19 +0000320/*
321 * m_sock_item and m_conn_item are on lists that are serialized under
322 * conn->c_lock. m_sock_item has additional meaning in that once it is empty
323 * the message will not be put back on the retransmit list after being sent.
324 * messages that are canceled while being sent rely on this.
325 *
326 * m_inc is used by loopback so that it can pass an incoming message straight
327 * back up into the rx path. It embeds a wire header which is also used by
328 * the send path, which is kind of awkward.
329 *
330 * m_sock_item indicates the message's presence on a socket's send or receive
331 * queue. m_rs will point to that socket.
332 *
333 * m_daddr is used by cancellation to prune messages to a given destination.
334 *
335 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
336 * nesting. As paths iterate over messages on a sock, or conn, they must
337 * also lock the conn, or sock, to remove the message from those lists too.
338 * Testing the flag to determine if the message is still on the lists lets
339 * us avoid testing the list_head directly. That means each path can use
340 * the message's list_head to keep it on a local list while juggling locks
341 * without confusing the other path.
342 *
343 * m_ack_seq is an optional field set by transports who need a different
344 * sequence number range to invalidate. They can use this in a callback
345 * that they pass to rds_send_drop_acked() to see if each message has been
346 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't
347 * had ack_seq set yet.
348 */
349#define RDS_MSG_ON_SOCK 1
350#define RDS_MSG_ON_CONN 2
351#define RDS_MSG_HAS_ACK_SEQ 3
352#define RDS_MSG_ACK_REQUIRED 4
353#define RDS_MSG_RETRANSMITTED 5
354#define RDS_MSG_MAPPED 6
355#define RDS_MSG_PAGEVEC 7
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800356#define RDS_MSG_FLUSH 8
Andy Grover39de8282009-02-24 15:30:19 +0000357
358struct rds_message {
Reshetova, Elena6c5a1c42017-07-04 15:53:18 +0300359 refcount_t m_refcount;
Andy Grover39de8282009-02-24 15:30:19 +0000360 struct list_head m_sock_item;
361 struct list_head m_conn_item;
362 struct rds_incoming m_inc;
363 u64 m_ack_seq;
364 __be32 m_daddr;
365 unsigned long m_flags;
366
367 /* Never access m_rs without holding m_rs_lock.
368 * Lock nesting is
369 * rm->m_rs_lock
370 * -> rs->rs_lock
371 */
372 spinlock_t m_rs_lock;
Chris Masonc83188d2010-04-21 13:09:28 -0700373 wait_queue_head_t m_flush_wait;
374
Andy Grover39de8282009-02-24 15:30:19 +0000375 struct rds_sock *m_rs;
Andy Grover7e3bd652010-03-01 16:04:59 -0800376
377 /* cookie to send to remote, in rds header */
Andy Grover39de8282009-02-24 15:30:19 +0000378 rds_rdma_cookie_t m_rdma_cookie;
Andy Grover7e3bd652010-03-01 16:04:59 -0800379
380 unsigned int m_used_sgs;
381 unsigned int m_total_sgs;
382
Andy Groverff3d7d32010-03-01 14:03:09 -0800383 void *m_final_op;
384
Andy Grovere7791372010-01-12 12:15:02 -0800385 struct {
Andy Grover15133f62010-01-12 14:33:38 -0800386 struct rm_atomic_op {
387 int op_type;
Andy Grover20c72bd2010-08-25 05:51:28 -0700388 union {
389 struct {
390 uint64_t compare;
391 uint64_t swap;
392 uint64_t compare_mask;
393 uint64_t swap_mask;
394 } op_m_cswp;
395 struct {
396 uint64_t add;
397 uint64_t nocarry_mask;
398 } op_m_fadd;
399 };
Andy Grover15133f62010-01-12 14:33:38 -0800400
401 u32 op_rkey;
402 u64 op_remote_addr;
403 unsigned int op_notify:1;
404 unsigned int op_recverr:1;
405 unsigned int op_mapped:1;
Andy Grover2c3a5f92010-03-01 16:10:40 -0800406 unsigned int op_silent:1;
Andy Grover15133f62010-01-12 14:33:38 -0800407 unsigned int op_active:1;
Andy Grover15133f62010-01-12 14:33:38 -0800408 struct scatterlist *op_sg;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800409 struct rds_notifier *op_notifier;
Andy Grover15133f62010-01-12 14:33:38 -0800410
411 struct rds_mr *op_rdma_mr;
412 } atomic;
413 struct rm_rdma_op {
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800414 u32 op_rkey;
415 u64 op_remote_addr;
416 unsigned int op_write:1;
417 unsigned int op_fence:1;
418 unsigned int op_notify:1;
419 unsigned int op_recverr:1;
420 unsigned int op_mapped:1;
Andy Grover2c3a5f92010-03-01 16:10:40 -0800421 unsigned int op_silent:1;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800422 unsigned int op_active:1;
423 unsigned int op_bytes;
424 unsigned int op_nents;
425 unsigned int op_count;
426 struct scatterlist *op_sg;
427 struct rds_notifier *op_notifier;
428
429 struct rds_mr *op_rdma_mr;
Andy Grovere7791372010-01-12 12:15:02 -0800430 } rdma;
Andy Grover15133f62010-01-12 14:33:38 -0800431 struct rm_data_op {
Andy Grover241eef32010-01-19 21:25:26 -0800432 unsigned int op_active:1;
Santosh Shilimkar941f8d52016-02-18 20:06:47 -0800433 unsigned int op_notify:1;
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800434 unsigned int op_nents;
435 unsigned int op_count;
Wengang Wangd655a9f2015-05-21 13:11:40 +0800436 unsigned int op_dmasg;
437 unsigned int op_dmaoff;
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800438 struct scatterlist *op_sg;
Andy Grovere7791372010-01-12 12:15:02 -0800439 } data;
440 };
Andy Grover39de8282009-02-24 15:30:19 +0000441};
442
443/*
444 * The RDS notifier is used (optionally) to tell the application about
445 * completed RDMA operations. Rather than keeping the whole rds message
446 * around on the queue, we allocate a small notifier that is put on the
447 * socket's notifier_list. Notifications are delivered to the application
448 * through control messages.
449 */
450struct rds_notifier {
451 struct list_head n_list;
452 uint64_t n_user_token;
453 int n_status;
454};
455
456/**
457 * struct rds_transport - transport specific behavioural hooks
458 *
459 * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
460 * part of a message. The caller serializes on the send_sem so this
461 * doesn't need to be reentrant for a given conn. The header must be
462 * sent before the data payload. .xmit must be prepared to send a
463 * message with no data payload. .xmit should return the number of
464 * bytes that were sent down the connection, including header bytes.
465 * Returning 0 tells the caller that it doesn't need to perform any
466 * additional work now. This is usually the case when the transport has
467 * filled the sending queue for its connection and will handle
468 * triggering the rds thread to continue the send when space becomes
469 * available. Returning -EAGAIN tells the caller to retry the send
470 * immediately. Returning -ENOMEM tells the caller to retry the send at
471 * some point in the future.
472 *
473 * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once
474 * it returns the connection can not call rds_recv_incoming().
475 * This will only be called once after conn_connect returns
476 * non-zero success and will The caller serializes this with
477 * the send and connecting paths (xmit_* and conn_*). The
478 * transport is responsible for other serialization, including
479 * rds_recv_incoming(). This is called in process context but
480 * should try hard not to block.
Andy Grover39de8282009-02-24 15:30:19 +0000481 */
482
483struct rds_transport {
484 char t_name[TRANSNAMSIZ];
485 struct list_head t_item;
486 struct module *t_owner;
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700487 unsigned int t_prefer_loopback:1,
488 t_mp_capable:1;
Andy Grover335776bd2009-08-21 12:28:34 +0000489 unsigned int t_type;
Andy Grover39de8282009-02-24 15:30:19 +0000490
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400491 int (*laddr_check)(struct net *net, __be32 addr);
Andy Grover39de8282009-02-24 15:30:19 +0000492 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
493 void (*conn_free)(void *data);
Sowmini Varadhanb04e8552016-06-30 16:11:16 -0700494 int (*conn_path_connect)(struct rds_conn_path *cp);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700495 void (*conn_path_shutdown)(struct rds_conn_path *conn);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700496 void (*xmit_path_prepare)(struct rds_conn_path *cp);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700497 void (*xmit_path_complete)(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000498 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
499 unsigned int hdr_off, unsigned int sg, unsigned int off);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800500 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
Andy Groverff3d7d32010-03-01 14:03:09 -0800501 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700502 int (*recv_path)(struct rds_conn_path *cp);
Al Viroc310e722014-11-20 09:21:14 -0500503 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
Andy Grover39de8282009-02-24 15:30:19 +0000504 void (*inc_free)(struct rds_incoming *inc);
505
506 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
507 struct rdma_cm_event *event);
508 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
509 void (*cm_connect_complete)(struct rds_connection *conn,
510 struct rdma_cm_event *event);
511
512 unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
513 unsigned int avail);
514 void (*exit)(void);
515 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
516 struct rds_sock *rs, u32 *key_ret);
517 void (*sync_mr)(void *trans_private, int direction);
518 void (*free_mr)(void *trans_private, int invalidate);
519 void (*flush_mrs)(void);
520};
521
522struct rds_sock {
523 struct sock rs_sk;
524
525 u64 rs_user_addr;
526 u64 rs_user_bytes;
527
528 /*
529 * bound_addr used for both incoming and outgoing, no INADDR_ANY
530 * support.
531 */
santosh.shilimkar@oracle.com7b565432015-10-30 08:49:10 -0700532 struct rhash_head rs_bound_node;
533 u64 rs_bound_key;
Andy Grover39de8282009-02-24 15:30:19 +0000534 __be32 rs_bound_addr;
535 __be32 rs_conn_addr;
536 __be16 rs_bound_port;
537 __be16 rs_conn_port;
Andy Grover39de8282009-02-24 15:30:19 +0000538 struct rds_transport *rs_transport;
539
540 /*
541 * rds_sendmsg caches the conn it used the last time around.
542 * This helps avoid costly lookups.
543 */
544 struct rds_connection *rs_conn;
545
546 /* flag indicating we were congested or not */
547 int rs_congested;
Andy Groverb98ba522010-03-11 13:50:04 +0000548 /* seen congestion (ENOBUFS) when sending? */
549 int rs_seen_congestion;
Andy Grover39de8282009-02-24 15:30:19 +0000550
551 /* rs_lock protects all these adjacent members before the newline */
552 spinlock_t rs_lock;
553 struct list_head rs_send_queue;
554 u32 rs_snd_bytes;
555 int rs_rcv_bytes;
556 struct list_head rs_notify_queue; /* currently used for failed RDMAs */
557
558 /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
559 * to decide whether the application should be woken up.
560 * If not set, we use rs_cong_track to find out whether a cong map
561 * update arrived.
562 */
563 uint64_t rs_cong_mask;
564 uint64_t rs_cong_notify;
565 struct list_head rs_cong_list;
566 unsigned long rs_cong_track;
567
568 /*
569 * rs_recv_lock protects the receive queue, and is
570 * used to serialize with rds_release.
571 */
572 rwlock_t rs_recv_lock;
573 struct list_head rs_recv_queue;
574
575 /* just for stats reporting */
576 struct list_head rs_item;
577
578 /* these have their own lock */
579 spinlock_t rs_rdma_lock;
580 struct rb_root rs_rdma_keys;
581
582 /* Socket options - in case there will be more */
583 unsigned char rs_recverr,
584 rs_cong_monitor;
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700585 u32 rs_hash_initval;
Santosh Shilimkar32890252016-07-04 22:35:15 -0700586
587 /* Socket receive path trace points*/
588 u8 rs_rx_traces;
589 u8 rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
Andy Grover39de8282009-02-24 15:30:19 +0000590};
591
592static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
593{
594 return container_of(sk, struct rds_sock, rs_sk);
595}
596static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
597{
598 return &rs->rs_sk;
599}
600
601/*
602 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
603 * to account for overhead. We don't account for overhead, we just apply
604 * the number of payload bytes to the specified value.
605 */
606static inline int rds_sk_sndbuf(struct rds_sock *rs)
607{
608 return rds_rs_to_sk(rs)->sk_sndbuf / 2;
609}
610static inline int rds_sk_rcvbuf(struct rds_sock *rs)
611{
612 return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
613}
614
615struct rds_statistics {
616 uint64_t s_conn_reset;
617 uint64_t s_recv_drop_bad_checksum;
618 uint64_t s_recv_drop_old_seq;
619 uint64_t s_recv_drop_no_sock;
620 uint64_t s_recv_drop_dead_sock;
621 uint64_t s_recv_deliver_raced;
622 uint64_t s_recv_delivered;
623 uint64_t s_recv_queued;
624 uint64_t s_recv_immediate_retry;
625 uint64_t s_recv_delayed_retry;
626 uint64_t s_recv_ack_required;
627 uint64_t s_recv_rdma_bytes;
628 uint64_t s_recv_ping;
629 uint64_t s_send_queue_empty;
630 uint64_t s_send_queue_full;
Andy Grover049ee3f2010-03-23 17:39:07 -0700631 uint64_t s_send_lock_contention;
632 uint64_t s_send_lock_queue_raced;
Andy Grover39de8282009-02-24 15:30:19 +0000633 uint64_t s_send_immediate_retry;
634 uint64_t s_send_delayed_retry;
635 uint64_t s_send_drop_acked;
636 uint64_t s_send_ack_required;
637 uint64_t s_send_queued;
638 uint64_t s_send_rdma;
639 uint64_t s_send_rdma_bytes;
640 uint64_t s_send_pong;
641 uint64_t s_page_remainder_hit;
642 uint64_t s_page_remainder_miss;
643 uint64_t s_copy_to_user;
644 uint64_t s_copy_from_user;
645 uint64_t s_cong_update_queued;
646 uint64_t s_cong_update_received;
647 uint64_t s_cong_send_error;
648 uint64_t s_cong_send_blocked;
Venkat Venkatsubra192a7982016-07-09 17:36:20 -0700649 uint64_t s_recv_bytes_added_to_socket;
650 uint64_t s_recv_bytes_removed_from_socket;
651
Andy Grover39de8282009-02-24 15:30:19 +0000652};
653
654/* af_rds.c */
655void rds_sock_addref(struct rds_sock *rs);
656void rds_sock_put(struct rds_sock *rs);
657void rds_wake_sk_sleep(struct rds_sock *rs);
658static inline void __rds_wake_sk_sleep(struct sock *sk)
659{
Eric Dumazetaa395142010-04-20 13:03:51 +0000660 wait_queue_head_t *waitq = sk_sleep(sk);
Andy Grover39de8282009-02-24 15:30:19 +0000661
662 if (!sock_flag(sk, SOCK_DEAD) && waitq)
663 wake_up(waitq);
664}
665extern wait_queue_head_t rds_poll_waitq;
666
667
668/* bind.c */
669int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
670void rds_remove_bound(struct rds_sock *rs);
671struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
santosh.shilimkar@oracle.com7b565432015-10-30 08:49:10 -0700672int rds_bind_lock_init(void);
673void rds_bind_lock_destroy(void);
Andy Grover39de8282009-02-24 15:30:19 +0000674
675/* cong.c */
676int rds_cong_get_maps(struct rds_connection *conn);
677void rds_cong_add_conn(struct rds_connection *conn);
678void rds_cong_remove_conn(struct rds_connection *conn);
679void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
680void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
681int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
682void rds_cong_queue_updates(struct rds_cong_map *map);
683void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
684int rds_cong_updated_since(unsigned long *recent);
685void rds_cong_add_socket(struct rds_sock *);
686void rds_cong_remove_socket(struct rds_sock *);
687void rds_cong_exit(void);
688struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
689
690/* conn.c */
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800691extern u32 rds_gen_num;
Zach Brownef87b7e2010-07-09 12:26:20 -0700692int rds_conn_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000693void rds_conn_exit(void);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400694struct rds_connection *rds_conn_create(struct net *net,
695 __be32 laddr, __be32 faddr,
Andy Grover39de8282009-02-24 15:30:19 +0000696 struct rds_transport *trans, gfp_t gfp);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400697struct rds_connection *rds_conn_create_outgoing(struct net *net,
698 __be32 laddr, __be32 faddr,
Andy Grover39de8282009-02-24 15:30:19 +0000699 struct rds_transport *trans, gfp_t gfp);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700700void rds_conn_shutdown(struct rds_conn_path *cpath);
Andy Grover39de8282009-02-24 15:30:19 +0000701void rds_conn_destroy(struct rds_connection *conn);
Andy Grover39de8282009-02-24 15:30:19 +0000702void rds_conn_drop(struct rds_connection *conn);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700703void rds_conn_path_drop(struct rds_conn_path *cpath);
Zach Brownf3c68082010-05-24 13:14:36 -0700704void rds_conn_connect_if_down(struct rds_connection *conn);
Sowmini Varadhan3c0a5902016-06-13 09:44:37 -0700705void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000706void rds_for_each_conn_info(struct socket *sock, unsigned int len,
707 struct rds_info_iterator *iter,
708 struct rds_info_lengths *lens,
709 int (*visitor)(struct rds_connection *, void *),
710 size_t item_len);
Andy Grover39de8282009-02-24 15:30:19 +0000711
Nicolas Iooss6cdaf032016-08-05 22:11:12 +0200712__printf(2, 3)
Sowmini Varadhanfb1b3dc2016-06-13 09:44:39 -0700713void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
714#define rds_conn_path_error(cp, fmt...) \
715 __rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
716
Andy Grover39de8282009-02-24 15:30:19 +0000717static inline int
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700718rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
719{
720 return atomic_cmpxchg(&cp->cp_state, old, new) == old;
721}
722
723static inline int
Andy Grover39de8282009-02-24 15:30:19 +0000724rds_conn_transition(struct rds_connection *conn, int old, int new)
725{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700726 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700727 return rds_conn_path_transition(&conn->c_path[0], old, new);
728}
729
730static inline int
731rds_conn_path_state(struct rds_conn_path *cp)
732{
733 return atomic_read(&cp->cp_state);
Andy Grover39de8282009-02-24 15:30:19 +0000734}
735
736static inline int
737rds_conn_state(struct rds_connection *conn)
738{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700739 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700740 return rds_conn_path_state(&conn->c_path[0]);
741}
742
743static inline int
744rds_conn_path_up(struct rds_conn_path *cp)
745{
746 return atomic_read(&cp->cp_state) == RDS_CONN_UP;
Andy Grover39de8282009-02-24 15:30:19 +0000747}
748
749static inline int
750rds_conn_up(struct rds_connection *conn)
751{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700752 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700753 return rds_conn_path_up(&conn->c_path[0]);
754}
755
756static inline int
757rds_conn_path_connecting(struct rds_conn_path *cp)
758{
759 return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
Andy Grover39de8282009-02-24 15:30:19 +0000760}
761
762static inline int
763rds_conn_connecting(struct rds_connection *conn)
764{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700765 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700766 return rds_conn_path_connecting(&conn->c_path[0]);
Andy Grover39de8282009-02-24 15:30:19 +0000767}
768
769/* message.c */
770struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
Andy Groverfc445082010-01-12 12:56:06 -0800771struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
Al Viro083735f2014-11-20 09:31:08 -0500772int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
Andy Grover39de8282009-02-24 15:30:19 +0000773struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
774void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
775 __be16 dport, u64 seq);
776int rds_message_add_extension(struct rds_header *hdr,
777 unsigned int type, const void *data, unsigned int len);
778int rds_message_next_extension(struct rds_header *hdr,
779 unsigned int *pos, void *buf, unsigned int *buflen);
Andy Grover39de8282009-02-24 15:30:19 +0000780int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
Al Viroc310e722014-11-20 09:21:14 -0500781int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
Andy Grover39de8282009-02-24 15:30:19 +0000782void rds_message_inc_free(struct rds_incoming *inc);
783void rds_message_addref(struct rds_message *rm);
784void rds_message_put(struct rds_message *rm);
785void rds_message_wait(struct rds_message *rm);
786void rds_message_unmapped(struct rds_message *rm);
787
788static inline void rds_message_make_checksum(struct rds_header *hdr)
789{
790 hdr->h_csum = 0;
791 hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
792}
793
794static inline int rds_message_verify_checksum(const struct rds_header *hdr)
795{
796 return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
797}
798
799
800/* page.c */
801int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
802 gfp_t gfp);
Andy Grover39de8282009-02-24 15:30:19 +0000803void rds_page_exit(void);
804
805/* recv.c */
806void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
807 __be32 saddr);
Sowmini Varadhan5e833e02016-06-13 09:44:29 -0700808void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
809 __be32 saddr);
Andy Grover39de8282009-02-24 15:30:19 +0000810void rds_inc_put(struct rds_incoming *inc);
811void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
Cong Wang6114eab2011-11-25 23:14:40 +0800812 struct rds_incoming *inc, gfp_t gfp);
Ying Xue1b784142015-03-02 15:37:48 +0800813int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
814 int msg_flags);
Andy Grover39de8282009-02-24 15:30:19 +0000815void rds_clear_recv_queue(struct rds_sock *rs);
816int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
817void rds_inc_info_copy(struct rds_incoming *inc,
818 struct rds_info_iterator *iter,
819 __be32 saddr, __be32 daddr, int flip);
820
821/* send.c */
Ying Xue1b784142015-03-02 15:37:48 +0800822int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700823void rds_send_path_reset(struct rds_conn_path *conn);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700824int rds_send_xmit(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000825struct sockaddr_in;
826void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
827typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
828void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
829 is_acked_func is_acked);
Sowmini Varadhan5c3d2742016-06-13 09:44:31 -0700830void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
831 is_acked_func is_acked);
Sowmini Varadhan69b92b52017-06-21 13:40:12 -0700832void rds_send_ping(struct rds_connection *conn, int cp_index);
Sowmini Varadhan45997e92016-06-13 09:44:36 -0700833int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
Andy Grover39de8282009-02-24 15:30:19 +0000834
835/* rdma.c */
836void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
Andy Grover21f79af2010-01-12 12:57:27 -0800837int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
838int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
839int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
840void rds_rdma_drop_keys(struct rds_sock *rs);
841int rds_rdma_extra_size(struct rds_rdma_args *args);
842int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
843 struct cmsghdr *cmsg);
844int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
845 struct cmsghdr *cmsg);
846int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
847 struct cmsghdr *cmsg);
848int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
849 struct cmsghdr *cmsg);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800850void rds_rdma_free_op(struct rm_rdma_op *ro);
Andy Groverd0ab25a2010-01-27 16:15:48 -0800851void rds_atomic_free_op(struct rm_atomic_op *ao);
Andy Grover15133f62010-01-12 14:33:38 -0800852void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
853void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
854int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
855 struct cmsghdr *cmsg);
Andy Grover21f79af2010-01-12 12:57:27 -0800856
Joe Perchesc1b12032013-10-18 13:48:25 -0700857void __rds_put_mr_final(struct rds_mr *mr);
Andy Grover21f79af2010-01-12 12:57:27 -0800858static inline void rds_mr_put(struct rds_mr *mr)
859{
Reshetova, Elena803ea852017-07-04 15:53:17 +0300860 if (refcount_dec_and_test(&mr->r_refcount))
Andy Grover21f79af2010-01-12 12:57:27 -0800861 __rds_put_mr_final(mr);
862}
Andy Grover39de8282009-02-24 15:30:19 +0000863
864/* stats.c */
David Howells9b8de742009-04-21 23:00:24 +0100865DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
Andy Grover39de8282009-02-24 15:30:19 +0000866#define rds_stats_inc_which(which, member) do { \
867 per_cpu(which, get_cpu()).member++; \
868 put_cpu(); \
869} while (0)
870#define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
871#define rds_stats_add_which(which, member, count) do { \
872 per_cpu(which, get_cpu()).member += count; \
873 put_cpu(); \
874} while (0)
875#define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
Zach Brownef87b7e2010-07-09 12:26:20 -0700876int rds_stats_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000877void rds_stats_exit(void);
878void rds_stats_info_copy(struct rds_info_iterator *iter,
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700879 uint64_t *values, const char *const *names,
880 size_t nr);
Andy Grover39de8282009-02-24 15:30:19 +0000881
882/* sysctl.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700883int rds_sysctl_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000884void rds_sysctl_exit(void);
885extern unsigned long rds_sysctl_sndbuf_min;
886extern unsigned long rds_sysctl_sndbuf_default;
887extern unsigned long rds_sysctl_sndbuf_max;
888extern unsigned long rds_sysctl_reconnect_min_jiffies;
889extern unsigned long rds_sysctl_reconnect_max_jiffies;
890extern unsigned int rds_sysctl_max_unacked_packets;
891extern unsigned int rds_sysctl_max_unacked_bytes;
892extern unsigned int rds_sysctl_ping_enable;
893extern unsigned long rds_sysctl_trace_flags;
894extern unsigned int rds_sysctl_trace_level;
895
896/* threads.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700897int rds_threads_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000898void rds_threads_exit(void);
899extern struct workqueue_struct *rds_wq;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700900void rds_queue_reconnect(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000901void rds_connect_worker(struct work_struct *);
902void rds_shutdown_worker(struct work_struct *);
903void rds_send_worker(struct work_struct *);
904void rds_recv_worker(struct work_struct *);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700905void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
Andy Grover39de8282009-02-24 15:30:19 +0000906void rds_connect_complete(struct rds_connection *conn);
907
908/* transport.c */
Zhu Yanjuna8d63a52017-03-03 00:44:26 -0500909void rds_trans_register(struct rds_transport *trans);
Andy Grover39de8282009-02-24 15:30:19 +0000910void rds_trans_unregister(struct rds_transport *trans);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400911struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
Zach Brown5adb5bc2010-07-23 10:32:31 -0700912void rds_trans_put(struct rds_transport *trans);
Andy Grover39de8282009-02-24 15:30:19 +0000913unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
914 unsigned int avail);
Sowmini Varadhand97dac52015-05-29 17:28:08 -0400915struct rds_transport *rds_trans_get(int t_type);
Zach Brownef87b7e2010-07-09 12:26:20 -0700916int rds_trans_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000917void rds_trans_exit(void);
918
919#endif