blob: a50e652eb269dce22f52900754839537678ebda8 [file] [log] [blame]
Andy Grover00e0f342009-02-24 15:30:23 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/list.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040036#include <linux/export.h>
Andy Grover00e0f342009-02-24 15:30:23 +000037#include <net/inet_hashtables.h>
38
39#include "rds.h"
40#include "loop.h"
Andy Grover00e0f342009-02-24 15:30:23 +000041
42#define RDS_CONNECTION_HASH_BITS 12
43#define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
44#define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
45
46/* converting this to RCU is a chore for another day.. */
47static DEFINE_SPINLOCK(rds_conn_lock);
48static unsigned long rds_conn_count;
49static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
50static struct kmem_cache *rds_conn_slab;
51
52static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
53{
Hannes Frederic Sowa1bbdcee2013-10-19 21:48:57 +020054 static u32 rds_hash_secret __read_mostly;
55
56 unsigned long hash;
57
58 net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
59
Andy Grover00e0f342009-02-24 15:30:23 +000060 /* Pass NULL, don't need struct net for hash */
Hannes Frederic Sowa1bbdcee2013-10-19 21:48:57 +020061 hash = __inet_ehashfn(be32_to_cpu(laddr), 0,
62 be32_to_cpu(faddr), 0,
63 rds_hash_secret);
Andy Grover00e0f342009-02-24 15:30:23 +000064 return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
65}
66
67#define rds_conn_info_set(var, test, suffix) do { \
68 if (test) \
69 var |= RDS_INFO_CONNECTION_FLAG_##suffix; \
70} while (0)
71
Chris Masonbcf50ef2010-05-11 15:15:15 -070072/* rcu read lock must be held or the connection spinlock */
Andy Grover00e0f342009-02-24 15:30:23 +000073static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
74 __be32 laddr, __be32 faddr,
75 struct rds_transport *trans)
76{
77 struct rds_connection *conn, *ret = NULL;
Andy Grover00e0f342009-02-24 15:30:23 +000078
Sasha Levinb67bfe02013-02-27 17:06:00 -080079 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
Andy Grover00e0f342009-02-24 15:30:23 +000080 if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
81 conn->c_trans == trans) {
82 ret = conn;
83 break;
84 }
85 }
86 rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret,
87 &laddr, &faddr);
88 return ret;
89}
90
91/*
92 * This is called by transports as they're bringing down a connection.
93 * It clears partial message state so that the transport can start sending
94 * and receiving over this connection again in the future. It is up to
95 * the transport to have serialized this call with its send and recv.
96 */
stephen hemmingerff51bf82010-10-19 08:08:33 +000097static void rds_conn_reset(struct rds_connection *conn)
Andy Grover00e0f342009-02-24 15:30:23 +000098{
99 rdsdebug("connection %pI4 to %pI4 reset\n",
100 &conn->c_laddr, &conn->c_faddr);
101
102 rds_stats_inc(s_conn_reset);
103 rds_send_reset(conn);
104 conn->c_flags = 0;
105
106 /* Do not clear next_rx_seq here, else we cannot distinguish
107 * retransmitted packets from new packets, and will hand all
108 * of them to the application. That is not consistent with the
109 * reliability guarantees of RDS. */
110}
111
112/*
113 * There is only every one 'conn' for a given pair of addresses in the
114 * system at a time. They contain messages to be retransmitted and so
115 * span the lifetime of the actual underlying transport connections.
116 *
117 * For now they are not garbage collected once they're created. They
118 * are torn down as the module is removed, if ever.
119 */
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400120static struct rds_connection *__rds_conn_create(struct net *net,
121 __be32 laddr, __be32 faddr,
Andy Grover00e0f342009-02-24 15:30:23 +0000122 struct rds_transport *trans, gfp_t gfp,
123 int is_outgoing)
124{
Andy Grovercb244052009-07-17 13:13:36 +0000125 struct rds_connection *conn, *parent = NULL;
Andy Grover00e0f342009-02-24 15:30:23 +0000126 struct hlist_head *head = rds_conn_bucket(laddr, faddr);
Zach Brown5adb5bc2010-07-23 10:32:31 -0700127 struct rds_transport *loop_trans;
Andy Grover00e0f342009-02-24 15:30:23 +0000128 unsigned long flags;
129 int ret;
Sowmini Varadhanf711a6a2015-05-05 15:20:51 -0400130 struct rds_transport *otrans = trans;
Andy Grover00e0f342009-02-24 15:30:23 +0000131
Sowmini Varadhanf711a6a2015-05-05 15:20:51 -0400132 if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
133 goto new_conn;
Chris Masonbcf50ef2010-05-11 15:15:15 -0700134 rcu_read_lock();
Andy Grover00e0f342009-02-24 15:30:23 +0000135 conn = rds_conn_lookup(head, laddr, faddr, trans);
Joe Perchesf64f9e72009-11-29 16:55:45 -0800136 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
Sowmini Varadhan1789b2c072015-04-08 12:33:46 -0400137 laddr == faddr && !is_outgoing) {
Andy Grover00e0f342009-02-24 15:30:23 +0000138 /* This is a looped back IB connection, and we're
139 * called by the code handling the incoming connect.
140 * We need a second connection object into which we
141 * can stick the other QP. */
142 parent = conn;
143 conn = parent->c_passive;
144 }
Chris Masonbcf50ef2010-05-11 15:15:15 -0700145 rcu_read_unlock();
Andy Grover00e0f342009-02-24 15:30:23 +0000146 if (conn)
147 goto out;
148
Sowmini Varadhanf711a6a2015-05-05 15:20:51 -0400149new_conn:
Wei Yongjun05a178e2009-04-09 14:09:44 +0000150 conn = kmem_cache_zalloc(rds_conn_slab, gfp);
Andy Grover8690bfa2010-01-12 11:56:44 -0800151 if (!conn) {
Andy Grover00e0f342009-02-24 15:30:23 +0000152 conn = ERR_PTR(-ENOMEM);
153 goto out;
154 }
155
Andy Grover00e0f342009-02-24 15:30:23 +0000156 INIT_HLIST_NODE(&conn->c_hash_node);
Andy Grover00e0f342009-02-24 15:30:23 +0000157 conn->c_laddr = laddr;
158 conn->c_faddr = faddr;
159 spin_lock_init(&conn->c_lock);
160 conn->c_next_tx_seq = 1;
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400161 rds_conn_net_set(conn, net);
Andy Grover00e0f342009-02-24 15:30:23 +0000162
Zach Brown0f4b1c72010-06-04 14:41:41 -0700163 init_waitqueue_head(&conn->c_waitq);
Andy Grover00e0f342009-02-24 15:30:23 +0000164 INIT_LIST_HEAD(&conn->c_send_queue);
165 INIT_LIST_HEAD(&conn->c_retrans);
166
167 ret = rds_cong_get_maps(conn);
168 if (ret) {
169 kmem_cache_free(rds_conn_slab, conn);
170 conn = ERR_PTR(ret);
171 goto out;
172 }
173
174 /*
175 * This is where a connection becomes loopback. If *any* RDS sockets
176 * can bind to the destination address then we'd rather the messages
177 * flow through loopback rather than either transport.
178 */
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400179 loop_trans = rds_trans_get_preferred(net, faddr);
Zach Brown5adb5bc2010-07-23 10:32:31 -0700180 if (loop_trans) {
181 rds_trans_put(loop_trans);
Andy Grover00e0f342009-02-24 15:30:23 +0000182 conn->c_loopback = 1;
183 if (is_outgoing && trans->t_prefer_loopback) {
184 /* "outgoing" connection - and the transport
185 * says it wants the connection handled by the
186 * loopback transport. This is what TCP does.
187 */
188 trans = &rds_loop_transport;
189 }
190 }
191
192 conn->c_trans = trans;
193
194 ret = trans->conn_alloc(conn, gfp);
195 if (ret) {
196 kmem_cache_free(rds_conn_slab, conn);
197 conn = ERR_PTR(ret);
198 goto out;
199 }
200
201 atomic_set(&conn->c_state, RDS_CONN_DOWN);
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400202 conn->c_send_gen = 0;
Andy Grover00e0f342009-02-24 15:30:23 +0000203 conn->c_reconnect_jiffies = 0;
204 INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
205 INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
206 INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker);
207 INIT_WORK(&conn->c_down_w, rds_shutdown_worker);
208 mutex_init(&conn->c_cm_lock);
209 conn->c_flags = 0;
210
211 rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
212 conn, &laddr, &faddr,
213 trans->t_name ? trans->t_name : "[unknown]",
214 is_outgoing ? "(outgoing)" : "");
215
Andy Grovercb244052009-07-17 13:13:36 +0000216 /*
217 * Since we ran without holding the conn lock, someone could
218 * have created the same conn (either normal or passive) in the
219 * interim. We check while holding the lock. If we won, we complete
220 * init and return our conn. If we lost, we rollback and return the
221 * other one.
222 */
Andy Grover00e0f342009-02-24 15:30:23 +0000223 spin_lock_irqsave(&rds_conn_lock, flags);
Andy Grovercb244052009-07-17 13:13:36 +0000224 if (parent) {
225 /* Creating passive conn */
226 if (parent->c_passive) {
227 trans->conn_free(conn->c_transport_data);
228 kmem_cache_free(rds_conn_slab, conn);
229 conn = parent->c_passive;
230 } else {
Andy Grover00e0f342009-02-24 15:30:23 +0000231 parent->c_passive = conn;
Andy Grovercb244052009-07-17 13:13:36 +0000232 rds_cong_add_conn(conn);
233 rds_conn_count++;
234 }
Andy Grover00e0f342009-02-24 15:30:23 +0000235 } else {
Andy Grovercb244052009-07-17 13:13:36 +0000236 /* Creating normal conn */
237 struct rds_connection *found;
Andy Grover00e0f342009-02-24 15:30:23 +0000238
Sowmini Varadhanc82ac7e2015-05-05 15:20:52 -0400239 if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
240 found = NULL;
241 else
242 found = rds_conn_lookup(head, laddr, faddr, trans);
Andy Grovercb244052009-07-17 13:13:36 +0000243 if (found) {
244 trans->conn_free(conn->c_transport_data);
245 kmem_cache_free(rds_conn_slab, conn);
246 conn = found;
247 } else {
Sowmini Varadhanc82ac7e2015-05-05 15:20:52 -0400248 if ((is_outgoing && otrans->t_type == RDS_TRANS_TCP) ||
249 (otrans->t_type != RDS_TRANS_TCP)) {
250 /* Only the active side should be added to
251 * reconnect list for TCP.
252 */
253 hlist_add_head_rcu(&conn->c_hash_node, head);
254 }
Andy Grovercb244052009-07-17 13:13:36 +0000255 rds_cong_add_conn(conn);
256 rds_conn_count++;
257 }
258 }
Andy Grover00e0f342009-02-24 15:30:23 +0000259 spin_unlock_irqrestore(&rds_conn_lock, flags);
260
261out:
262 return conn;
263}
264
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400265struct rds_connection *rds_conn_create(struct net *net,
266 __be32 laddr, __be32 faddr,
Andy Grover00e0f342009-02-24 15:30:23 +0000267 struct rds_transport *trans, gfp_t gfp)
268{
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400269 return __rds_conn_create(net, laddr, faddr, trans, gfp, 0);
Andy Grover00e0f342009-02-24 15:30:23 +0000270}
Andy Grover616b7572009-08-21 12:28:32 +0000271EXPORT_SYMBOL_GPL(rds_conn_create);
Andy Grover00e0f342009-02-24 15:30:23 +0000272
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400273struct rds_connection *rds_conn_create_outgoing(struct net *net,
274 __be32 laddr, __be32 faddr,
Andy Grover00e0f342009-02-24 15:30:23 +0000275 struct rds_transport *trans, gfp_t gfp)
276{
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400277 return __rds_conn_create(net, laddr, faddr, trans, gfp, 1);
Andy Grover00e0f342009-02-24 15:30:23 +0000278}
Andy Grover616b7572009-08-21 12:28:32 +0000279EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
Andy Grover00e0f342009-02-24 15:30:23 +0000280
Andy Grover2dc39352010-06-11 13:49:13 -0700281void rds_conn_shutdown(struct rds_connection *conn)
282{
283 /* shut it down unless it's down already */
284 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
285 /*
286 * Quiesce the connection mgmt handlers before we start tearing
287 * things down. We don't hold the mutex for the entire
288 * duration of the shutdown operation, else we may be
289 * deadlocking with the CM handler. Instead, the CM event
290 * handler is supposed to check for state DISCONNECTING
291 */
292 mutex_lock(&conn->c_cm_lock);
293 if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING)
294 && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
295 rds_conn_error(conn, "shutdown called in state %d\n",
296 atomic_read(&conn->c_state));
297 mutex_unlock(&conn->c_cm_lock);
298 return;
299 }
300 mutex_unlock(&conn->c_cm_lock);
301
Zach Brown0f4b1c72010-06-04 14:41:41 -0700302 wait_event(conn->c_waitq,
303 !test_bit(RDS_IN_XMIT, &conn->c_flags));
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700304 wait_event(conn->c_waitq,
305 !test_bit(RDS_RECV_REFILL, &conn->c_flags));
Chris Mason7e3f2952010-05-11 15:11:11 -0700306
Andy Grover2dc39352010-06-11 13:49:13 -0700307 conn->c_trans->conn_shutdown(conn);
308 rds_conn_reset(conn);
Andy Grover2dc39352010-06-11 13:49:13 -0700309
310 if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) {
311 /* This can happen - eg when we're in the middle of tearing
312 * down the connection, and someone unloads the rds module.
313 * Quite reproduceable with loopback connections.
314 * Mostly harmless.
315 */
316 rds_conn_error(conn,
317 "%s: failed to transition to state DOWN, "
318 "current state is %d\n",
319 __func__,
320 atomic_read(&conn->c_state));
321 return;
322 }
323 }
324
325 /* Then reconnect if it's still live.
326 * The passive side of an IB loopback connection is never added
327 * to the conn hash, so we never trigger a reconnect on this
328 * conn - the reconnect is always triggered by the active peer. */
329 cancel_delayed_work_sync(&conn->c_conn_w);
Chris Masonbcf50ef2010-05-11 15:15:15 -0700330 rcu_read_lock();
331 if (!hlist_unhashed(&conn->c_hash_node)) {
332 rcu_read_unlock();
Andy Grover2dc39352010-06-11 13:49:13 -0700333 rds_queue_reconnect(conn);
Chris Masonbcf50ef2010-05-11 15:15:15 -0700334 } else {
335 rcu_read_unlock();
336 }
Andy Grover2dc39352010-06-11 13:49:13 -0700337}
338
339/*
340 * Stop and free a connection.
Zach Brownffcec0e2010-07-23 10:36:58 -0700341 *
342 * This can only be used in very limited circumstances. It assumes that once
343 * the conn has been shutdown that no one else is referencing the connection.
344 * We can only ensure this in the rmmod path in the current code.
Andy Grover2dc39352010-06-11 13:49:13 -0700345 */
Andy Grover00e0f342009-02-24 15:30:23 +0000346void rds_conn_destroy(struct rds_connection *conn)
347{
348 struct rds_message *rm, *rtmp;
Zach Brownfe8ff6b2010-07-23 10:30:45 -0700349 unsigned long flags;
Andy Grover00e0f342009-02-24 15:30:23 +0000350
351 rdsdebug("freeing conn %p for %pI4 -> "
352 "%pI4\n", conn, &conn->c_laddr,
353 &conn->c_faddr);
354
Chris Masonabf45432010-05-11 15:14:52 -0700355 /* Ensure conn will not be scheduled for reconnect */
356 spin_lock_irq(&rds_conn_lock);
Chris Masonbcf50ef2010-05-11 15:15:15 -0700357 hlist_del_init_rcu(&conn->c_hash_node);
Chris Masonabf45432010-05-11 15:14:52 -0700358 spin_unlock_irq(&rds_conn_lock);
Chris Masonbcf50ef2010-05-11 15:15:15 -0700359 synchronize_rcu();
360
Zach Brownffcec0e2010-07-23 10:36:58 -0700361 /* shut the connection down */
362 rds_conn_drop(conn);
363 flush_work(&conn->c_down_w);
Andy Grover00e0f342009-02-24 15:30:23 +0000364
Zach Brown45180712010-07-23 10:37:33 -0700365 /* make sure lingering queued work won't try to ref the conn */
366 cancel_delayed_work_sync(&conn->c_send_w);
367 cancel_delayed_work_sync(&conn->c_recv_w);
368
Andy Grover00e0f342009-02-24 15:30:23 +0000369 /* tear down queued messages */
370 list_for_each_entry_safe(rm, rtmp,
371 &conn->c_send_queue,
372 m_conn_item) {
373 list_del_init(&rm->m_conn_item);
374 BUG_ON(!list_empty(&rm->m_sock_item));
375 rds_message_put(rm);
376 }
377 if (conn->c_xmit_rm)
378 rds_message_put(conn->c_xmit_rm);
379
380 conn->c_trans->conn_free(conn->c_transport_data);
381
382 /*
383 * The congestion maps aren't freed up here. They're
384 * freed by rds_cong_exit() after all the connections
385 * have been freed.
386 */
387 rds_cong_remove_conn(conn);
388
389 BUG_ON(!list_empty(&conn->c_retrans));
390 kmem_cache_free(rds_conn_slab, conn);
391
Zach Brownfe8ff6b2010-07-23 10:30:45 -0700392 spin_lock_irqsave(&rds_conn_lock, flags);
Andy Grover00e0f342009-02-24 15:30:23 +0000393 rds_conn_count--;
Zach Brownfe8ff6b2010-07-23 10:30:45 -0700394 spin_unlock_irqrestore(&rds_conn_lock, flags);
Andy Grover00e0f342009-02-24 15:30:23 +0000395}
Andy Grover616b7572009-08-21 12:28:32 +0000396EXPORT_SYMBOL_GPL(rds_conn_destroy);
Andy Grover00e0f342009-02-24 15:30:23 +0000397
398static void rds_conn_message_info(struct socket *sock, unsigned int len,
399 struct rds_info_iterator *iter,
400 struct rds_info_lengths *lens,
401 int want_send)
402{
403 struct hlist_head *head;
Andy Grover00e0f342009-02-24 15:30:23 +0000404 struct list_head *list;
405 struct rds_connection *conn;
406 struct rds_message *rm;
Andy Grover00e0f342009-02-24 15:30:23 +0000407 unsigned int total = 0;
Zach Brown501dccc2010-06-04 14:25:27 -0700408 unsigned long flags;
Andy Grover00e0f342009-02-24 15:30:23 +0000409 size_t i;
410
411 len /= sizeof(struct rds_info_message);
412
Chris Masonbcf50ef2010-05-11 15:15:15 -0700413 rcu_read_lock();
Andy Grover00e0f342009-02-24 15:30:23 +0000414
415 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
416 i++, head++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800417 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
Andy Grover00e0f342009-02-24 15:30:23 +0000418 if (want_send)
419 list = &conn->c_send_queue;
420 else
421 list = &conn->c_retrans;
422
Zach Brown501dccc2010-06-04 14:25:27 -0700423 spin_lock_irqsave(&conn->c_lock, flags);
Andy Grover00e0f342009-02-24 15:30:23 +0000424
425 /* XXX too lazy to maintain counts.. */
426 list_for_each_entry(rm, list, m_conn_item) {
427 total++;
428 if (total <= len)
429 rds_inc_info_copy(&rm->m_inc, iter,
430 conn->c_laddr,
431 conn->c_faddr, 0);
432 }
433
Zach Brown501dccc2010-06-04 14:25:27 -0700434 spin_unlock_irqrestore(&conn->c_lock, flags);
Andy Grover00e0f342009-02-24 15:30:23 +0000435 }
436 }
Chris Masonbcf50ef2010-05-11 15:15:15 -0700437 rcu_read_unlock();
Andy Grover00e0f342009-02-24 15:30:23 +0000438
439 lens->nr = total;
440 lens->each = sizeof(struct rds_info_message);
441}
442
443static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
444 struct rds_info_iterator *iter,
445 struct rds_info_lengths *lens)
446{
447 rds_conn_message_info(sock, len, iter, lens, 1);
448}
449
450static void rds_conn_message_info_retrans(struct socket *sock,
451 unsigned int len,
452 struct rds_info_iterator *iter,
453 struct rds_info_lengths *lens)
454{
455 rds_conn_message_info(sock, len, iter, lens, 0);
456}
457
458void rds_for_each_conn_info(struct socket *sock, unsigned int len,
459 struct rds_info_iterator *iter,
460 struct rds_info_lengths *lens,
461 int (*visitor)(struct rds_connection *, void *),
462 size_t item_len)
463{
464 uint64_t buffer[(item_len + 7) / 8];
465 struct hlist_head *head;
Andy Grover00e0f342009-02-24 15:30:23 +0000466 struct rds_connection *conn;
Andy Grover00e0f342009-02-24 15:30:23 +0000467 size_t i;
468
Chris Masonbcf50ef2010-05-11 15:15:15 -0700469 rcu_read_lock();
Andy Grover00e0f342009-02-24 15:30:23 +0000470
471 lens->nr = 0;
472 lens->each = item_len;
473
474 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
475 i++, head++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800476 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
Andy Grover00e0f342009-02-24 15:30:23 +0000477
478 /* XXX no c_lock usage.. */
479 if (!visitor(conn, buffer))
480 continue;
481
482 /* We copy as much as we can fit in the buffer,
483 * but we count all items so that the caller
484 * can resize the buffer. */
485 if (len >= item_len) {
486 rds_info_copy(iter, buffer, item_len);
487 len -= item_len;
488 }
489 lens->nr++;
490 }
491 }
Chris Masonbcf50ef2010-05-11 15:15:15 -0700492 rcu_read_unlock();
Andy Grover00e0f342009-02-24 15:30:23 +0000493}
Andy Grover616b7572009-08-21 12:28:32 +0000494EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
Andy Grover00e0f342009-02-24 15:30:23 +0000495
496static int rds_conn_info_visitor(struct rds_connection *conn,
497 void *buffer)
498{
499 struct rds_info_connection *cinfo = buffer;
500
501 cinfo->next_tx_seq = conn->c_next_tx_seq;
502 cinfo->next_rx_seq = conn->c_next_rx_seq;
503 cinfo->laddr = conn->c_laddr;
504 cinfo->faddr = conn->c_faddr;
505 strncpy(cinfo->transport, conn->c_trans->t_name,
506 sizeof(cinfo->transport));
507 cinfo->flags = 0;
508
Zach Brown0f4b1c72010-06-04 14:41:41 -0700509 rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &conn->c_flags),
510 SENDING);
Andy Grover00e0f342009-02-24 15:30:23 +0000511 /* XXX Future: return the state rather than these funky bits */
512 rds_conn_info_set(cinfo->flags,
513 atomic_read(&conn->c_state) == RDS_CONN_CONNECTING,
514 CONNECTING);
515 rds_conn_info_set(cinfo->flags,
516 atomic_read(&conn->c_state) == RDS_CONN_UP,
517 CONNECTED);
518 return 1;
519}
520
521static void rds_conn_info(struct socket *sock, unsigned int len,
522 struct rds_info_iterator *iter,
523 struct rds_info_lengths *lens)
524{
525 rds_for_each_conn_info(sock, len, iter, lens,
526 rds_conn_info_visitor,
527 sizeof(struct rds_info_connection));
528}
529
Zach Brownef87b7e2010-07-09 12:26:20 -0700530int rds_conn_init(void)
Andy Grover00e0f342009-02-24 15:30:23 +0000531{
532 rds_conn_slab = kmem_cache_create("rds_connection",
533 sizeof(struct rds_connection),
534 0, 0, NULL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800535 if (!rds_conn_slab)
Andy Grover00e0f342009-02-24 15:30:23 +0000536 return -ENOMEM;
537
538 rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
539 rds_info_register_func(RDS_INFO_SEND_MESSAGES,
540 rds_conn_message_info_send);
541 rds_info_register_func(RDS_INFO_RETRANS_MESSAGES,
542 rds_conn_message_info_retrans);
543
544 return 0;
545}
546
547void rds_conn_exit(void)
548{
549 rds_loop_exit();
550
551 WARN_ON(!hlist_empty(rds_conn_hash));
552
553 kmem_cache_destroy(rds_conn_slab);
554
555 rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
556 rds_info_deregister_func(RDS_INFO_SEND_MESSAGES,
557 rds_conn_message_info_send);
558 rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES,
559 rds_conn_message_info_retrans);
560}
561
562/*
563 * Force a disconnect
564 */
565void rds_conn_drop(struct rds_connection *conn)
566{
567 atomic_set(&conn->c_state, RDS_CONN_ERROR);
568 queue_work(rds_wq, &conn->c_down_w);
569}
Andy Grover616b7572009-08-21 12:28:32 +0000570EXPORT_SYMBOL_GPL(rds_conn_drop);
Andy Grover00e0f342009-02-24 15:30:23 +0000571
572/*
Zach Brownf3c68082010-05-24 13:14:36 -0700573 * If the connection is down, trigger a connect. We may have scheduled a
574 * delayed reconnect however - in this case we should not interfere.
575 */
576void rds_conn_connect_if_down(struct rds_connection *conn)
577{
578 if (rds_conn_state(conn) == RDS_CONN_DOWN &&
579 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
580 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
581}
582EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
583
584/*
Andy Grover00e0f342009-02-24 15:30:23 +0000585 * An error occurred on the connection
586 */
587void
588__rds_conn_error(struct rds_connection *conn, const char *fmt, ...)
589{
590 va_list ap;
591
592 va_start(ap, fmt);
593 vprintk(fmt, ap);
594 va_end(ap);
595
596 rds_conn_drop(conn);
597}