blob: 82d38ccf5e8bcf99eefd20934744cbeb7410406b [file] [log] [blame]
Andy Grover39de8282009-02-24 15:30:19 +00001#ifndef _RDS_RDS_H
2#define _RDS_RDS_H
3
4#include <net/sock.h>
5#include <linux/scatterlist.h>
6#include <linux/highmem.h>
7#include <rdma/rdma_cm.h>
8#include <linux/mutex.h>
9#include <linux/rds.h>
santosh.shilimkar@oracle.com7b565432015-10-30 08:49:10 -070010#include <linux/rhashtable.h>
Andy Grover39de8282009-02-24 15:30:19 +000011
12#include "info.h"
13
14/*
15 * RDS Network protocol version
16 */
17#define RDS_PROTOCOL_3_0 0x0300
18#define RDS_PROTOCOL_3_1 0x0301
19#define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
20#define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
21#define RDS_PROTOCOL_MINOR(v) ((v) & 255)
22#define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
23
24/*
25 * XXX randomly chosen, but at least seems to be unused:
26 * # 18464-18768 Unassigned
27 * We should do better. We want a reserved port to discourage unpriv'ed
28 * userspace from listening.
29 */
30#define RDS_PORT 18634
31
Andy Grover8cbd9602009-04-01 08:20:20 +000032#ifdef ATOMIC64_INIT
33#define KERNEL_HAS_ATOMIC64
34#endif
35
shamir rabinovitchff570872016-10-27 05:46:38 -040036#ifdef RDS_DEBUG
Andy Grover39de8282009-02-24 15:30:19 +000037#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
38#else
39/* sigh, pr_debug() causes unused variable warnings */
Joe Perchesb9075fa2011-10-31 17:11:33 -070040static inline __printf(1, 2)
41void rdsdebug(char *fmt, ...)
Andy Grover39de8282009-02-24 15:30:19 +000042{
43}
44#endif
45
46/* XXX is there one of these somewhere? */
47#define ceil(x, y) \
48 ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
49
50#define RDS_FRAG_SHIFT 12
51#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
52
Avinash Repakaf9fb69a2016-02-29 15:30:57 -080053/* Used to limit both RDMA and non-RDMA RDS message to 1MB */
54#define RDS_MAX_MSG_SIZE ((unsigned int)(1 << 20))
55
Andy Grover39de8282009-02-24 15:30:19 +000056#define RDS_CONG_MAP_BYTES (65536 / 8)
Andy Grover39de8282009-02-24 15:30:19 +000057#define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
58#define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
59
60struct rds_cong_map {
61 struct rb_node m_rb_node;
62 __be32 m_addr;
63 wait_queue_head_t m_waitq;
64 struct list_head m_conn_list;
65 unsigned long m_page_addrs[RDS_CONG_MAP_PAGES];
66};
67
68
69/*
70 * This is how we will track the connection state:
71 * A connection is always in one of the following
72 * states. Updates to the state are atomic and imply
73 * a memory barrier.
74 */
75enum {
76 RDS_CONN_DOWN = 0,
77 RDS_CONN_CONNECTING,
78 RDS_CONN_DISCONNECTING,
79 RDS_CONN_UP,
Sowmini Varadhan9c794402016-06-04 14:00:00 -070080 RDS_CONN_RESETTING,
Andy Grover39de8282009-02-24 15:30:19 +000081 RDS_CONN_ERROR,
82};
83
84/* Bits for c_flags */
85#define RDS_LL_SEND_FULL 0
86#define RDS_RECONNECT_PENDING 1
Zach Brown0f4b1c72010-06-04 14:41:41 -070087#define RDS_IN_XMIT 2
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -070088#define RDS_RECV_REFILL 3
Andy Grover39de8282009-02-24 15:30:19 +000089
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070090/* Max number of multipaths per RDS connection. Must be a power of 2 */
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -070091#define RDS_MPATH_WORKERS 8
92#define RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
93 (rs)->rs_hash_initval) & ((n) - 1))
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070094
95/* Per mpath connection state */
96struct rds_conn_path {
97 struct rds_connection *cp_conn;
98 struct rds_message *cp_xmit_rm;
99 unsigned long cp_xmit_sg;
100 unsigned int cp_xmit_hdr_off;
101 unsigned int cp_xmit_data_off;
102 unsigned int cp_xmit_atomic_sent;
103 unsigned int cp_xmit_rdma_sent;
104 unsigned int cp_xmit_data_sent;
105
106 spinlock_t cp_lock; /* protect msg queues */
107 u64 cp_next_tx_seq;
108 struct list_head cp_send_queue;
109 struct list_head cp_retrans;
110
111 u64 cp_next_rx_seq;
112
113 void *cp_transport_data;
114
115 atomic_t cp_state;
116 unsigned long cp_send_gen;
117 unsigned long cp_flags;
118 unsigned long cp_reconnect_jiffies;
119 struct delayed_work cp_send_w;
120 struct delayed_work cp_recv_w;
121 struct delayed_work cp_conn_w;
122 struct work_struct cp_down_w;
123 struct mutex cp_cm_lock; /* protect cp_state & cm */
124 wait_queue_head_t cp_waitq;
125
126 unsigned int cp_unacked_packets;
127 unsigned int cp_unacked_bytes;
128 unsigned int cp_outgoing:1,
129 cp_pad_to_32:31;
130 unsigned int cp_index;
131};
132
133/* One rds_connection per RDS address pair */
Andy Grover39de8282009-02-24 15:30:19 +0000134struct rds_connection {
135 struct hlist_node c_hash_node;
136 __be32 c_laddr;
137 __be32 c_faddr;
Sowmini Varadhan3b20fc32015-09-30 16:54:07 -0400138 unsigned int c_loopback:1,
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700139 c_ping_triggered:1,
140 c_pad_to_32:30;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700141 int c_npaths;
Andy Grover39de8282009-02-24 15:30:19 +0000142 struct rds_connection *c_passive;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700143 struct rds_transport *c_trans;
Andy Grover39de8282009-02-24 15:30:19 +0000144
145 struct rds_cong_map *c_lcong;
146 struct rds_cong_map *c_fcong;
147
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700148 /* Protocol version */
149 unsigned int c_version;
Sowmini Varadhan8edc3af2017-03-04 08:57:33 -0800150 struct net *c_net;
Andy Grover39de8282009-02-24 15:30:19 +0000151
152 struct list_head c_map_item;
153 unsigned long c_map_queued;
Andy Grover39de8282009-02-24 15:30:19 +0000154
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700155 struct rds_conn_path c_path[RDS_MPATH_WORKERS];
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700156 wait_queue_head_t c_hs_waitq; /* handshake waitq */
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800157
158 u32 c_my_gen_num;
159 u32 c_peer_gen_num;
Andy Grover39de8282009-02-24 15:30:19 +0000160};
161
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400162static inline
163struct net *rds_conn_net(struct rds_connection *conn)
164{
Sowmini Varadhan8edc3af2017-03-04 08:57:33 -0800165 return conn->c_net;
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400166}
167
168static inline
169void rds_conn_net_set(struct rds_connection *conn, struct net *net)
170{
Sowmini Varadhan8edc3af2017-03-04 08:57:33 -0800171 conn->c_net = get_net(net);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400172}
173
Andy Grover39de8282009-02-24 15:30:19 +0000174#define RDS_FLAG_CONG_BITMAP 0x01
175#define RDS_FLAG_ACK_REQUIRED 0x02
176#define RDS_FLAG_RETRANSMITTED 0x04
Steve Wise7b70d032009-04-09 14:09:39 +0000177#define RDS_MAX_ADV_CREDIT 255
Andy Grover39de8282009-02-24 15:30:19 +0000178
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700179/* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
180 * probe to exchange control information before establishing a connection.
181 * Currently the control information that is exchanged is the number of
182 * supported paths. If the peer is a legacy (older kernel revision) peer,
183 * it would return a pong message without additional control information
184 * that would then alert the sender that the peer was an older rev.
185 */
186#define RDS_FLAG_PROBE_PORT 1
187#define RDS_HS_PROBE(sport, dport) \
188 ((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
189 (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
Andy Grover39de8282009-02-24 15:30:19 +0000190/*
191 * Maximum space available for extension headers.
192 */
193#define RDS_HEADER_EXT_SPACE 16
194
195struct rds_header {
196 __be64 h_sequence;
197 __be64 h_ack;
198 __be32 h_len;
199 __be16 h_sport;
200 __be16 h_dport;
201 u8 h_flags;
202 u8 h_credit;
203 u8 h_padding[4];
204 __sum16 h_csum;
205
206 u8 h_exthdr[RDS_HEADER_EXT_SPACE];
207};
208
209/*
210 * Reserved - indicates end of extensions
211 */
212#define RDS_EXTHDR_NONE 0
213
214/*
215 * This extension header is included in the very
216 * first message that is sent on a new connection,
217 * and identifies the protocol level. This will help
218 * rolling updates if a future change requires breaking
219 * the protocol.
220 * NB: This is no longer true for IB, where we do a version
221 * negotiation during the connection setup phase (protocol
222 * version information is included in the RDMA CM private data).
223 */
224#define RDS_EXTHDR_VERSION 1
225struct rds_ext_header_version {
226 __be32 h_version;
227};
228
229/*
230 * This extension header is included in the RDS message
231 * chasing an RDMA operation.
232 */
233#define RDS_EXTHDR_RDMA 2
234struct rds_ext_header_rdma {
235 __be32 h_rdma_rkey;
236};
237
238/*
239 * This extension header tells the peer about the
240 * destination <R_Key,offset> of the requested RDMA
241 * operation.
242 */
243#define RDS_EXTHDR_RDMA_DEST 3
244struct rds_ext_header_rdma_dest {
245 __be32 h_rdma_rkey;
246 __be32 h_rdma_offset;
247};
248
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700249/* Extension header announcing number of paths.
250 * Implicit length = 2 bytes.
251 */
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800252#define RDS_EXTHDR_NPATHS 5
253#define RDS_EXTHDR_GEN_NUM 6
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700254
Andy Grover39de8282009-02-24 15:30:19 +0000255#define __RDS_EXTHDR_MAX 16 /* for now */
Santosh Shilimkar32890252016-07-04 22:35:15 -0700256#define RDS_RX_MAX_TRACES (RDS_MSG_RX_DGRAM_TRACE_MAX + 1)
257#define RDS_MSG_RX_HDR 0
258#define RDS_MSG_RX_START 1
259#define RDS_MSG_RX_END 2
260#define RDS_MSG_RX_CMSG 3
Andy Grover39de8282009-02-24 15:30:19 +0000261
262struct rds_incoming {
263 atomic_t i_refcount;
264 struct list_head i_item;
265 struct rds_connection *i_conn;
Sowmini Varadhanef9e62c2016-06-13 09:44:28 -0700266 struct rds_conn_path *i_conn_path;
Andy Grover39de8282009-02-24 15:30:19 +0000267 struct rds_header i_hdr;
268 unsigned long i_rx_jiffies;
269 __be32 i_saddr;
270
271 rds_rdma_cookie_t i_rdma_cookie;
santosh.shilimkar@oracle.com5711f8b2016-03-01 15:20:43 -0800272 struct timeval i_rx_tstamp;
Santosh Shilimkar32890252016-07-04 22:35:15 -0700273 u64 i_rx_lat_trace[RDS_RX_MAX_TRACES];
Andy Grover39de8282009-02-24 15:30:19 +0000274};
275
Andy Grover21f79af2010-01-12 12:57:27 -0800276struct rds_mr {
277 struct rb_node r_rb_node;
278 atomic_t r_refcount;
279 u32 r_key;
280
281 /* A copy of the creation flags */
282 unsigned int r_use_once:1;
283 unsigned int r_invalidate:1;
284 unsigned int r_write:1;
285
286 /* This is for RDS_MR_DEAD.
287 * It would be nice & consistent to make this part of the above
288 * bit field here, but we need to use test_and_set_bit.
289 */
290 unsigned long r_state;
291 struct rds_sock *r_sock; /* back pointer to the socket that owns us */
292 struct rds_transport *r_trans;
293 void *r_trans_private;
294};
295
296/* Flags for mr->r_state */
297#define RDS_MR_DEAD 0
298
Andy Grover21f79af2010-01-12 12:57:27 -0800299static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
300{
301 return r_key | (((u64) offset) << 32);
302}
303
304static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
305{
306 return cookie;
307}
308
309static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
310{
311 return cookie >> 32;
312}
313
Andy Grover15133f62010-01-12 14:33:38 -0800314/* atomic operation types */
315#define RDS_ATOMIC_TYPE_CSWP 0
316#define RDS_ATOMIC_TYPE_FADD 1
317
Andy Grover39de8282009-02-24 15:30:19 +0000318/*
319 * m_sock_item and m_conn_item are on lists that are serialized under
320 * conn->c_lock. m_sock_item has additional meaning in that once it is empty
321 * the message will not be put back on the retransmit list after being sent.
322 * messages that are canceled while being sent rely on this.
323 *
324 * m_inc is used by loopback so that it can pass an incoming message straight
325 * back up into the rx path. It embeds a wire header which is also used by
326 * the send path, which is kind of awkward.
327 *
328 * m_sock_item indicates the message's presence on a socket's send or receive
329 * queue. m_rs will point to that socket.
330 *
331 * m_daddr is used by cancellation to prune messages to a given destination.
332 *
333 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
334 * nesting. As paths iterate over messages on a sock, or conn, they must
335 * also lock the conn, or sock, to remove the message from those lists too.
336 * Testing the flag to determine if the message is still on the lists lets
337 * us avoid testing the list_head directly. That means each path can use
338 * the message's list_head to keep it on a local list while juggling locks
339 * without confusing the other path.
340 *
341 * m_ack_seq is an optional field set by transports who need a different
342 * sequence number range to invalidate. They can use this in a callback
343 * that they pass to rds_send_drop_acked() to see if each message has been
344 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't
345 * had ack_seq set yet.
346 */
347#define RDS_MSG_ON_SOCK 1
348#define RDS_MSG_ON_CONN 2
349#define RDS_MSG_HAS_ACK_SEQ 3
350#define RDS_MSG_ACK_REQUIRED 4
351#define RDS_MSG_RETRANSMITTED 5
352#define RDS_MSG_MAPPED 6
353#define RDS_MSG_PAGEVEC 7
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800354#define RDS_MSG_FLUSH 8
Andy Grover39de8282009-02-24 15:30:19 +0000355
356struct rds_message {
357 atomic_t m_refcount;
358 struct list_head m_sock_item;
359 struct list_head m_conn_item;
360 struct rds_incoming m_inc;
361 u64 m_ack_seq;
362 __be32 m_daddr;
363 unsigned long m_flags;
364
365 /* Never access m_rs without holding m_rs_lock.
366 * Lock nesting is
367 * rm->m_rs_lock
368 * -> rs->rs_lock
369 */
370 spinlock_t m_rs_lock;
Chris Masonc83188d2010-04-21 13:09:28 -0700371 wait_queue_head_t m_flush_wait;
372
Andy Grover39de8282009-02-24 15:30:19 +0000373 struct rds_sock *m_rs;
Andy Grover7e3bd652010-03-01 16:04:59 -0800374
375 /* cookie to send to remote, in rds header */
Andy Grover39de8282009-02-24 15:30:19 +0000376 rds_rdma_cookie_t m_rdma_cookie;
Andy Grover7e3bd652010-03-01 16:04:59 -0800377
378 unsigned int m_used_sgs;
379 unsigned int m_total_sgs;
380
Andy Groverff3d7d32010-03-01 14:03:09 -0800381 void *m_final_op;
382
Andy Grovere7791372010-01-12 12:15:02 -0800383 struct {
Andy Grover15133f62010-01-12 14:33:38 -0800384 struct rm_atomic_op {
385 int op_type;
Andy Grover20c72bd2010-08-25 05:51:28 -0700386 union {
387 struct {
388 uint64_t compare;
389 uint64_t swap;
390 uint64_t compare_mask;
391 uint64_t swap_mask;
392 } op_m_cswp;
393 struct {
394 uint64_t add;
395 uint64_t nocarry_mask;
396 } op_m_fadd;
397 };
Andy Grover15133f62010-01-12 14:33:38 -0800398
399 u32 op_rkey;
400 u64 op_remote_addr;
401 unsigned int op_notify:1;
402 unsigned int op_recverr:1;
403 unsigned int op_mapped:1;
Andy Grover2c3a5f92010-03-01 16:10:40 -0800404 unsigned int op_silent:1;
Andy Grover15133f62010-01-12 14:33:38 -0800405 unsigned int op_active:1;
Andy Grover15133f62010-01-12 14:33:38 -0800406 struct scatterlist *op_sg;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800407 struct rds_notifier *op_notifier;
Andy Grover15133f62010-01-12 14:33:38 -0800408
409 struct rds_mr *op_rdma_mr;
410 } atomic;
411 struct rm_rdma_op {
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800412 u32 op_rkey;
413 u64 op_remote_addr;
414 unsigned int op_write:1;
415 unsigned int op_fence:1;
416 unsigned int op_notify:1;
417 unsigned int op_recverr:1;
418 unsigned int op_mapped:1;
Andy Grover2c3a5f92010-03-01 16:10:40 -0800419 unsigned int op_silent:1;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800420 unsigned int op_active:1;
421 unsigned int op_bytes;
422 unsigned int op_nents;
423 unsigned int op_count;
424 struct scatterlist *op_sg;
425 struct rds_notifier *op_notifier;
426
427 struct rds_mr *op_rdma_mr;
Andy Grovere7791372010-01-12 12:15:02 -0800428 } rdma;
Andy Grover15133f62010-01-12 14:33:38 -0800429 struct rm_data_op {
Andy Grover241eef32010-01-19 21:25:26 -0800430 unsigned int op_active:1;
Santosh Shilimkar941f8d52016-02-18 20:06:47 -0800431 unsigned int op_notify:1;
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800432 unsigned int op_nents;
433 unsigned int op_count;
Wengang Wangd655a9f2015-05-21 13:11:40 +0800434 unsigned int op_dmasg;
435 unsigned int op_dmaoff;
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800436 struct scatterlist *op_sg;
Andy Grovere7791372010-01-12 12:15:02 -0800437 } data;
438 };
Andy Grover39de8282009-02-24 15:30:19 +0000439};
440
441/*
442 * The RDS notifier is used (optionally) to tell the application about
443 * completed RDMA operations. Rather than keeping the whole rds message
444 * around on the queue, we allocate a small notifier that is put on the
445 * socket's notifier_list. Notifications are delivered to the application
446 * through control messages.
447 */
448struct rds_notifier {
449 struct list_head n_list;
450 uint64_t n_user_token;
451 int n_status;
452};
453
454/**
455 * struct rds_transport - transport specific behavioural hooks
456 *
457 * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
458 * part of a message. The caller serializes on the send_sem so this
459 * doesn't need to be reentrant for a given conn. The header must be
460 * sent before the data payload. .xmit must be prepared to send a
461 * message with no data payload. .xmit should return the number of
462 * bytes that were sent down the connection, including header bytes.
463 * Returning 0 tells the caller that it doesn't need to perform any
464 * additional work now. This is usually the case when the transport has
465 * filled the sending queue for its connection and will handle
466 * triggering the rds thread to continue the send when space becomes
467 * available. Returning -EAGAIN tells the caller to retry the send
468 * immediately. Returning -ENOMEM tells the caller to retry the send at
469 * some point in the future.
470 *
471 * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once
472 * it returns the connection can not call rds_recv_incoming().
473 * This will only be called once after conn_connect returns
474 * non-zero success and will The caller serializes this with
475 * the send and connecting paths (xmit_* and conn_*). The
476 * transport is responsible for other serialization, including
477 * rds_recv_incoming(). This is called in process context but
478 * should try hard not to block.
Andy Grover39de8282009-02-24 15:30:19 +0000479 */
480
481struct rds_transport {
482 char t_name[TRANSNAMSIZ];
483 struct list_head t_item;
484 struct module *t_owner;
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700485 unsigned int t_prefer_loopback:1,
486 t_mp_capable:1;
Andy Grover335776bd2009-08-21 12:28:34 +0000487 unsigned int t_type;
Andy Grover39de8282009-02-24 15:30:19 +0000488
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400489 int (*laddr_check)(struct net *net, __be32 addr);
Andy Grover39de8282009-02-24 15:30:19 +0000490 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
491 void (*conn_free)(void *data);
Sowmini Varadhanb04e8552016-06-30 16:11:16 -0700492 int (*conn_path_connect)(struct rds_conn_path *cp);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700493 void (*conn_path_shutdown)(struct rds_conn_path *conn);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700494 void (*xmit_path_prepare)(struct rds_conn_path *cp);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700495 void (*xmit_path_complete)(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000496 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
497 unsigned int hdr_off, unsigned int sg, unsigned int off);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800498 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
Andy Groverff3d7d32010-03-01 14:03:09 -0800499 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700500 int (*recv_path)(struct rds_conn_path *cp);
Al Viroc310e722014-11-20 09:21:14 -0500501 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
Andy Grover39de8282009-02-24 15:30:19 +0000502 void (*inc_free)(struct rds_incoming *inc);
503
504 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
505 struct rdma_cm_event *event);
506 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
507 void (*cm_connect_complete)(struct rds_connection *conn,
508 struct rdma_cm_event *event);
509
510 unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
511 unsigned int avail);
512 void (*exit)(void);
513 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
514 struct rds_sock *rs, u32 *key_ret);
515 void (*sync_mr)(void *trans_private, int direction);
516 void (*free_mr)(void *trans_private, int invalidate);
517 void (*flush_mrs)(void);
518};
519
520struct rds_sock {
521 struct sock rs_sk;
522
523 u64 rs_user_addr;
524 u64 rs_user_bytes;
525
526 /*
527 * bound_addr used for both incoming and outgoing, no INADDR_ANY
528 * support.
529 */
santosh.shilimkar@oracle.com7b565432015-10-30 08:49:10 -0700530 struct rhash_head rs_bound_node;
531 u64 rs_bound_key;
Andy Grover39de8282009-02-24 15:30:19 +0000532 __be32 rs_bound_addr;
533 __be32 rs_conn_addr;
534 __be16 rs_bound_port;
535 __be16 rs_conn_port;
Andy Grover39de8282009-02-24 15:30:19 +0000536 struct rds_transport *rs_transport;
537
538 /*
539 * rds_sendmsg caches the conn it used the last time around.
540 * This helps avoid costly lookups.
541 */
542 struct rds_connection *rs_conn;
543
544 /* flag indicating we were congested or not */
545 int rs_congested;
Andy Groverb98ba522010-03-11 13:50:04 +0000546 /* seen congestion (ENOBUFS) when sending? */
547 int rs_seen_congestion;
Andy Grover39de8282009-02-24 15:30:19 +0000548
549 /* rs_lock protects all these adjacent members before the newline */
550 spinlock_t rs_lock;
551 struct list_head rs_send_queue;
552 u32 rs_snd_bytes;
553 int rs_rcv_bytes;
554 struct list_head rs_notify_queue; /* currently used for failed RDMAs */
555
556 /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
557 * to decide whether the application should be woken up.
558 * If not set, we use rs_cong_track to find out whether a cong map
559 * update arrived.
560 */
561 uint64_t rs_cong_mask;
562 uint64_t rs_cong_notify;
563 struct list_head rs_cong_list;
564 unsigned long rs_cong_track;
565
566 /*
567 * rs_recv_lock protects the receive queue, and is
568 * used to serialize with rds_release.
569 */
570 rwlock_t rs_recv_lock;
571 struct list_head rs_recv_queue;
572
573 /* just for stats reporting */
574 struct list_head rs_item;
575
576 /* these have their own lock */
577 spinlock_t rs_rdma_lock;
578 struct rb_root rs_rdma_keys;
579
580 /* Socket options - in case there will be more */
581 unsigned char rs_recverr,
582 rs_cong_monitor;
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700583 u32 rs_hash_initval;
Santosh Shilimkar32890252016-07-04 22:35:15 -0700584
585 /* Socket receive path trace points*/
586 u8 rs_rx_traces;
587 u8 rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
Andy Grover39de8282009-02-24 15:30:19 +0000588};
589
590static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
591{
592 return container_of(sk, struct rds_sock, rs_sk);
593}
594static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
595{
596 return &rs->rs_sk;
597}
598
599/*
600 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
601 * to account for overhead. We don't account for overhead, we just apply
602 * the number of payload bytes to the specified value.
603 */
604static inline int rds_sk_sndbuf(struct rds_sock *rs)
605{
606 return rds_rs_to_sk(rs)->sk_sndbuf / 2;
607}
608static inline int rds_sk_rcvbuf(struct rds_sock *rs)
609{
610 return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
611}
612
613struct rds_statistics {
614 uint64_t s_conn_reset;
615 uint64_t s_recv_drop_bad_checksum;
616 uint64_t s_recv_drop_old_seq;
617 uint64_t s_recv_drop_no_sock;
618 uint64_t s_recv_drop_dead_sock;
619 uint64_t s_recv_deliver_raced;
620 uint64_t s_recv_delivered;
621 uint64_t s_recv_queued;
622 uint64_t s_recv_immediate_retry;
623 uint64_t s_recv_delayed_retry;
624 uint64_t s_recv_ack_required;
625 uint64_t s_recv_rdma_bytes;
626 uint64_t s_recv_ping;
627 uint64_t s_send_queue_empty;
628 uint64_t s_send_queue_full;
Andy Grover049ee3f2010-03-23 17:39:07 -0700629 uint64_t s_send_lock_contention;
630 uint64_t s_send_lock_queue_raced;
Andy Grover39de8282009-02-24 15:30:19 +0000631 uint64_t s_send_immediate_retry;
632 uint64_t s_send_delayed_retry;
633 uint64_t s_send_drop_acked;
634 uint64_t s_send_ack_required;
635 uint64_t s_send_queued;
636 uint64_t s_send_rdma;
637 uint64_t s_send_rdma_bytes;
638 uint64_t s_send_pong;
639 uint64_t s_page_remainder_hit;
640 uint64_t s_page_remainder_miss;
641 uint64_t s_copy_to_user;
642 uint64_t s_copy_from_user;
643 uint64_t s_cong_update_queued;
644 uint64_t s_cong_update_received;
645 uint64_t s_cong_send_error;
646 uint64_t s_cong_send_blocked;
Venkat Venkatsubra192a7982016-07-09 17:36:20 -0700647 uint64_t s_recv_bytes_added_to_socket;
648 uint64_t s_recv_bytes_removed_from_socket;
649
Andy Grover39de8282009-02-24 15:30:19 +0000650};
651
652/* af_rds.c */
653void rds_sock_addref(struct rds_sock *rs);
654void rds_sock_put(struct rds_sock *rs);
655void rds_wake_sk_sleep(struct rds_sock *rs);
656static inline void __rds_wake_sk_sleep(struct sock *sk)
657{
Eric Dumazetaa395142010-04-20 13:03:51 +0000658 wait_queue_head_t *waitq = sk_sleep(sk);
Andy Grover39de8282009-02-24 15:30:19 +0000659
660 if (!sock_flag(sk, SOCK_DEAD) && waitq)
661 wake_up(waitq);
662}
663extern wait_queue_head_t rds_poll_waitq;
664
665
666/* bind.c */
667int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
668void rds_remove_bound(struct rds_sock *rs);
669struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
santosh.shilimkar@oracle.com7b565432015-10-30 08:49:10 -0700670int rds_bind_lock_init(void);
671void rds_bind_lock_destroy(void);
Andy Grover39de8282009-02-24 15:30:19 +0000672
673/* cong.c */
674int rds_cong_get_maps(struct rds_connection *conn);
675void rds_cong_add_conn(struct rds_connection *conn);
676void rds_cong_remove_conn(struct rds_connection *conn);
677void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
678void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
679int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
680void rds_cong_queue_updates(struct rds_cong_map *map);
681void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
682int rds_cong_updated_since(unsigned long *recent);
683void rds_cong_add_socket(struct rds_sock *);
684void rds_cong_remove_socket(struct rds_sock *);
685void rds_cong_exit(void);
686struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
687
688/* conn.c */
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800689extern u32 rds_gen_num;
Zach Brownef87b7e2010-07-09 12:26:20 -0700690int rds_conn_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000691void rds_conn_exit(void);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400692struct rds_connection *rds_conn_create(struct net *net,
693 __be32 laddr, __be32 faddr,
Andy Grover39de8282009-02-24 15:30:19 +0000694 struct rds_transport *trans, gfp_t gfp);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400695struct rds_connection *rds_conn_create_outgoing(struct net *net,
696 __be32 laddr, __be32 faddr,
Andy Grover39de8282009-02-24 15:30:19 +0000697 struct rds_transport *trans, gfp_t gfp);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700698void rds_conn_shutdown(struct rds_conn_path *cpath);
Andy Grover39de8282009-02-24 15:30:19 +0000699void rds_conn_destroy(struct rds_connection *conn);
Andy Grover39de8282009-02-24 15:30:19 +0000700void rds_conn_drop(struct rds_connection *conn);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700701void rds_conn_path_drop(struct rds_conn_path *cpath);
Zach Brownf3c68082010-05-24 13:14:36 -0700702void rds_conn_connect_if_down(struct rds_connection *conn);
Sowmini Varadhan3c0a5902016-06-13 09:44:37 -0700703void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000704void rds_for_each_conn_info(struct socket *sock, unsigned int len,
705 struct rds_info_iterator *iter,
706 struct rds_info_lengths *lens,
707 int (*visitor)(struct rds_connection *, void *),
708 size_t item_len);
Andy Grover39de8282009-02-24 15:30:19 +0000709
Nicolas Iooss6cdaf032016-08-05 22:11:12 +0200710__printf(2, 3)
Sowmini Varadhanfb1b3dc2016-06-13 09:44:39 -0700711void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
712#define rds_conn_path_error(cp, fmt...) \
713 __rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
714
Andy Grover39de8282009-02-24 15:30:19 +0000715static inline int
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700716rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
717{
718 return atomic_cmpxchg(&cp->cp_state, old, new) == old;
719}
720
721static inline int
Andy Grover39de8282009-02-24 15:30:19 +0000722rds_conn_transition(struct rds_connection *conn, int old, int new)
723{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700724 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700725 return rds_conn_path_transition(&conn->c_path[0], old, new);
726}
727
728static inline int
729rds_conn_path_state(struct rds_conn_path *cp)
730{
731 return atomic_read(&cp->cp_state);
Andy Grover39de8282009-02-24 15:30:19 +0000732}
733
734static inline int
735rds_conn_state(struct rds_connection *conn)
736{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700737 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700738 return rds_conn_path_state(&conn->c_path[0]);
739}
740
741static inline int
742rds_conn_path_up(struct rds_conn_path *cp)
743{
744 return atomic_read(&cp->cp_state) == RDS_CONN_UP;
Andy Grover39de8282009-02-24 15:30:19 +0000745}
746
747static inline int
748rds_conn_up(struct rds_connection *conn)
749{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700750 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700751 return rds_conn_path_up(&conn->c_path[0]);
752}
753
754static inline int
755rds_conn_path_connecting(struct rds_conn_path *cp)
756{
757 return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
Andy Grover39de8282009-02-24 15:30:19 +0000758}
759
760static inline int
761rds_conn_connecting(struct rds_connection *conn)
762{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700763 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700764 return rds_conn_path_connecting(&conn->c_path[0]);
Andy Grover39de8282009-02-24 15:30:19 +0000765}
766
767/* message.c */
768struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
Andy Groverfc445082010-01-12 12:56:06 -0800769struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
Al Viro083735f2014-11-20 09:31:08 -0500770int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
Andy Grover39de8282009-02-24 15:30:19 +0000771struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
772void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
773 __be16 dport, u64 seq);
774int rds_message_add_extension(struct rds_header *hdr,
775 unsigned int type, const void *data, unsigned int len);
776int rds_message_next_extension(struct rds_header *hdr,
777 unsigned int *pos, void *buf, unsigned int *buflen);
Andy Grover39de8282009-02-24 15:30:19 +0000778int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
Al Viroc310e722014-11-20 09:21:14 -0500779int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
Andy Grover39de8282009-02-24 15:30:19 +0000780void rds_message_inc_free(struct rds_incoming *inc);
781void rds_message_addref(struct rds_message *rm);
782void rds_message_put(struct rds_message *rm);
783void rds_message_wait(struct rds_message *rm);
784void rds_message_unmapped(struct rds_message *rm);
785
786static inline void rds_message_make_checksum(struct rds_header *hdr)
787{
788 hdr->h_csum = 0;
789 hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
790}
791
792static inline int rds_message_verify_checksum(const struct rds_header *hdr)
793{
794 return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
795}
796
797
798/* page.c */
799int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
800 gfp_t gfp);
Andy Grover39de8282009-02-24 15:30:19 +0000801void rds_page_exit(void);
802
803/* recv.c */
804void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
805 __be32 saddr);
Sowmini Varadhan5e833e02016-06-13 09:44:29 -0700806void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
807 __be32 saddr);
Andy Grover39de8282009-02-24 15:30:19 +0000808void rds_inc_put(struct rds_incoming *inc);
809void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
Cong Wang6114eab2011-11-25 23:14:40 +0800810 struct rds_incoming *inc, gfp_t gfp);
Ying Xue1b784142015-03-02 15:37:48 +0800811int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
812 int msg_flags);
Andy Grover39de8282009-02-24 15:30:19 +0000813void rds_clear_recv_queue(struct rds_sock *rs);
814int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
815void rds_inc_info_copy(struct rds_incoming *inc,
816 struct rds_info_iterator *iter,
817 __be32 saddr, __be32 daddr, int flip);
818
819/* send.c */
Ying Xue1b784142015-03-02 15:37:48 +0800820int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700821void rds_send_path_reset(struct rds_conn_path *conn);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700822int rds_send_xmit(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000823struct sockaddr_in;
824void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
825typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
826void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
827 is_acked_func is_acked);
Sowmini Varadhan5c3d2742016-06-13 09:44:31 -0700828void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
829 is_acked_func is_acked);
Sowmini Varadhan45997e92016-06-13 09:44:36 -0700830int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
Andy Grover39de8282009-02-24 15:30:19 +0000831
832/* rdma.c */
833void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
Andy Grover21f79af2010-01-12 12:57:27 -0800834int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
835int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
836int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
837void rds_rdma_drop_keys(struct rds_sock *rs);
838int rds_rdma_extra_size(struct rds_rdma_args *args);
839int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
840 struct cmsghdr *cmsg);
841int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
842 struct cmsghdr *cmsg);
843int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
844 struct cmsghdr *cmsg);
845int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
846 struct cmsghdr *cmsg);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800847void rds_rdma_free_op(struct rm_rdma_op *ro);
Andy Groverd0ab25a2010-01-27 16:15:48 -0800848void rds_atomic_free_op(struct rm_atomic_op *ao);
Andy Grover15133f62010-01-12 14:33:38 -0800849void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
850void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
851int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
852 struct cmsghdr *cmsg);
Andy Grover21f79af2010-01-12 12:57:27 -0800853
Joe Perchesc1b12032013-10-18 13:48:25 -0700854void __rds_put_mr_final(struct rds_mr *mr);
Andy Grover21f79af2010-01-12 12:57:27 -0800855static inline void rds_mr_put(struct rds_mr *mr)
856{
857 if (atomic_dec_and_test(&mr->r_refcount))
858 __rds_put_mr_final(mr);
859}
Andy Grover39de8282009-02-24 15:30:19 +0000860
861/* stats.c */
David Howells9b8de742009-04-21 23:00:24 +0100862DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
Andy Grover39de8282009-02-24 15:30:19 +0000863#define rds_stats_inc_which(which, member) do { \
864 per_cpu(which, get_cpu()).member++; \
865 put_cpu(); \
866} while (0)
867#define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
868#define rds_stats_add_which(which, member, count) do { \
869 per_cpu(which, get_cpu()).member += count; \
870 put_cpu(); \
871} while (0)
872#define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
Zach Brownef87b7e2010-07-09 12:26:20 -0700873int rds_stats_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000874void rds_stats_exit(void);
875void rds_stats_info_copy(struct rds_info_iterator *iter,
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700876 uint64_t *values, const char *const *names,
877 size_t nr);
Andy Grover39de8282009-02-24 15:30:19 +0000878
879/* sysctl.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700880int rds_sysctl_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000881void rds_sysctl_exit(void);
882extern unsigned long rds_sysctl_sndbuf_min;
883extern unsigned long rds_sysctl_sndbuf_default;
884extern unsigned long rds_sysctl_sndbuf_max;
885extern unsigned long rds_sysctl_reconnect_min_jiffies;
886extern unsigned long rds_sysctl_reconnect_max_jiffies;
887extern unsigned int rds_sysctl_max_unacked_packets;
888extern unsigned int rds_sysctl_max_unacked_bytes;
889extern unsigned int rds_sysctl_ping_enable;
890extern unsigned long rds_sysctl_trace_flags;
891extern unsigned int rds_sysctl_trace_level;
892
893/* threads.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700894int rds_threads_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000895void rds_threads_exit(void);
896extern struct workqueue_struct *rds_wq;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700897void rds_queue_reconnect(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000898void rds_connect_worker(struct work_struct *);
899void rds_shutdown_worker(struct work_struct *);
900void rds_send_worker(struct work_struct *);
901void rds_recv_worker(struct work_struct *);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700902void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
Andy Grover39de8282009-02-24 15:30:19 +0000903void rds_connect_complete(struct rds_connection *conn);
904
905/* transport.c */
Zhu Yanjuna8d63a52017-03-03 00:44:26 -0500906void rds_trans_register(struct rds_transport *trans);
Andy Grover39de8282009-02-24 15:30:19 +0000907void rds_trans_unregister(struct rds_transport *trans);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400908struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
Zach Brown5adb5bc2010-07-23 10:32:31 -0700909void rds_trans_put(struct rds_transport *trans);
Andy Grover39de8282009-02-24 15:30:19 +0000910unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
911 unsigned int avail);
Sowmini Varadhand97dac52015-05-29 17:28:08 -0400912struct rds_transport *rds_trans_get(int t_type);
Zach Brownef87b7e2010-07-09 12:26:20 -0700913int rds_trans_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000914void rds_trans_exit(void);
915
916#endif