blob: ebbaee664c667d26c4e9562c49697c45885a0fe0 [file] [log] [blame]
Zach Brown98211482005-12-15 14:31:23 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 *
3 * vim: noexpandtab sw=8 ts=8 sts=0:
4 *
5 * Copyright (C) 2004 Oracle. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public
18 * License along with this program; if not, write to the
19 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 * Boston, MA 021110-1307, USA.
21 *
22 * ----
23 *
24 * Callers for this were originally written against a very simple synchronus
25 * API. This implementation reflects those simple callers. Some day I'm sure
26 * we'll need to move to a more robust posting/callback mechanism.
27 *
28 * Transmit calls pass in kernel virtual addresses and block copying this into
29 * the socket's tx buffers via a usual blocking sendmsg. They'll block waiting
30 * for a failed socket to timeout. TX callers can also pass in a poniter to an
31 * 'int' which gets filled with an errno off the wire in response to the
32 * message they send.
33 *
34 * Handlers for unsolicited messages are registered. Each socket has a page
35 * that incoming data is copied into. First the header, then the data.
36 * Handlers are called from only one thread with a reference to this per-socket
37 * page. This page is destroyed after the handler call, so it can't be
38 * referenced beyond the call. Handlers may block but are discouraged from
39 * doing so.
40 *
41 * Any framing errors (bad magic, large payload lengths) close a connection.
42 *
43 * Our sock_container holds the state we associate with a socket. It's current
44 * framing state is held there as well as the refcounting we do around when it
45 * is safe to tear down the socket. The socket is only finally torn down from
46 * the container when the container loses all of its references -- so as long
47 * as you hold a ref on the container you can trust that the socket is valid
48 * for use with kernel socket APIs.
49 *
50 * Connections are initiated between a pair of nodes when the node with the
51 * higher node number gets a heartbeat callback which indicates that the lower
52 * numbered node has started heartbeating. The lower numbered node is passive
53 * and only accepts the connection if the higher numbered node is heartbeating.
54 */
55
56#include <linux/kernel.h>
57#include <linux/jiffies.h>
58#include <linux/slab.h>
59#include <linux/idr.h>
60#include <linux/kref.h>
61#include <net/tcp.h>
62
63#include <asm/uaccess.h>
64
65#include "heartbeat.h"
66#include "tcp.h"
67#include "nodemanager.h"
68#define MLOG_MASK_PREFIX ML_TCP
69#include "masklog.h"
70#include "quorum.h"
71
72#include "tcp_internal.h"
73
74/*
75 * The linux network stack isn't sparse endian clean.. It has macros like
76 * ntohs() which perform the endian checks and structs like sockaddr_in
77 * which aren't annotated. So __force is found here to get the build
78 * clean. When they emerge from the dark ages and annotate the code
79 * we can remove these.
80 */
81
82#define SC_NODEF_FMT "node %s (num %u) at %u.%u.%u.%u:%u"
83#define SC_NODEF_ARGS(sc) sc->sc_node->nd_name, sc->sc_node->nd_num, \
84 NIPQUAD(sc->sc_node->nd_ipv4_address), \
85 ntohs(sc->sc_node->nd_ipv4_port)
86
87/*
88 * In the following two log macros, the whitespace after the ',' just
89 * before ##args is intentional. Otherwise, gcc 2.95 will eat the
90 * previous token if args expands to nothing.
91 */
92#define msglog(hdr, fmt, args...) do { \
93 typeof(hdr) __hdr = (hdr); \
94 mlog(ML_MSG, "[mag %u len %u typ %u stat %d sys_stat %d " \
95 "key %08x num %u] " fmt, \
96 be16_to_cpu(__hdr->magic), be16_to_cpu(__hdr->data_len), \
97 be16_to_cpu(__hdr->msg_type), be32_to_cpu(__hdr->status), \
98 be32_to_cpu(__hdr->sys_status), be32_to_cpu(__hdr->key), \
99 be32_to_cpu(__hdr->msg_num) , ##args); \
100} while (0)
101
102#define sclog(sc, fmt, args...) do { \
103 typeof(sc) __sc = (sc); \
104 mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \
105 "pg_off %zu] " fmt, __sc, \
106 atomic_read(&__sc->sc_kref.refcount), __sc->sc_sock, \
107 __sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off , \
108 ##args); \
109} while (0)
110
Ingo Molnar34af9462006-06-27 02:53:55 -0700111static DEFINE_RWLOCK(o2net_handler_lock);
Zach Brown98211482005-12-15 14:31:23 -0800112static struct rb_root o2net_handler_tree = RB_ROOT;
113
114static struct o2net_node o2net_nodes[O2NM_MAX_NODES];
115
116/* XXX someday we'll need better accounting */
117static struct socket *o2net_listen_sock = NULL;
118
119/*
120 * listen work is only queued by the listening socket callbacks on the
121 * o2net_wq. teardown detaches the callbacks before destroying the workqueue.
122 * quorum work is queued as sock containers are shutdown.. stop_listening
123 * tears down all the node's sock containers, preventing future shutdowns
124 * and queued quroum work, before canceling delayed quorum work and
125 * destroying the work queue.
126 */
127static struct workqueue_struct *o2net_wq;
128static struct work_struct o2net_listen_work;
129
130static struct o2hb_callback_func o2net_hb_up, o2net_hb_down;
131#define O2NET_HB_PRI 0x1
132
133static struct o2net_handshake *o2net_hand;
134static struct o2net_msg *o2net_keep_req, *o2net_keep_resp;
135
136static int o2net_sys_err_translations[O2NET_ERR_MAX] =
137 {[O2NET_ERR_NONE] = 0,
138 [O2NET_ERR_NO_HNDLR] = -ENOPROTOOPT,
139 [O2NET_ERR_OVERFLOW] = -EOVERFLOW,
140 [O2NET_ERR_DIED] = -EHOSTDOWN,};
141
142/* can't quite avoid *all* internal declarations :/ */
David Howellsc4028952006-11-22 14:57:56 +0000143static void o2net_sc_connect_completed(struct work_struct *work);
144static void o2net_rx_until_empty(struct work_struct *work);
145static void o2net_shutdown_sc(struct work_struct *work);
Zach Brown98211482005-12-15 14:31:23 -0800146static void o2net_listen_data_ready(struct sock *sk, int bytes);
David Howellsc4028952006-11-22 14:57:56 +0000147static void o2net_sc_send_keep_req(struct work_struct *work);
Zach Brown98211482005-12-15 14:31:23 -0800148static void o2net_idle_timer(unsigned long data);
149static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100150static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc);
151
152/*
153 * FIXME: These should use to_o2nm_cluster_from_node(), but we end up
154 * losing our parent link to the cluster during shutdown. This can be
155 * solved by adding a pre-removal callback to configfs, or passing
156 * around the cluster with the node. -jeffm
157 */
158static inline int o2net_reconnect_delay(struct o2nm_node *node)
159{
160 return o2nm_single_cluster->cl_reconnect_delay_ms;
161}
162
163static inline int o2net_keepalive_delay(struct o2nm_node *node)
164{
165 return o2nm_single_cluster->cl_keepalive_delay_ms;
166}
167
168static inline int o2net_idle_timeout(struct o2nm_node *node)
169{
170 return o2nm_single_cluster->cl_idle_timeout_ms;
171}
Zach Brown98211482005-12-15 14:31:23 -0800172
173static inline int o2net_sys_err_to_errno(enum o2net_system_error err)
174{
175 int trans;
176 BUG_ON(err >= O2NET_ERR_MAX);
177 trans = o2net_sys_err_translations[err];
178
179 /* Just in case we mess up the translation table above */
180 BUG_ON(err != O2NET_ERR_NONE && trans == 0);
181 return trans;
182}
183
184static struct o2net_node * o2net_nn_from_num(u8 node_num)
185{
186 BUG_ON(node_num >= ARRAY_SIZE(o2net_nodes));
187 return &o2net_nodes[node_num];
188}
189
190static u8 o2net_num_from_nn(struct o2net_node *nn)
191{
192 BUG_ON(nn == NULL);
193 return nn - o2net_nodes;
194}
195
196/* ------------------------------------------------------------ */
197
198static int o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw)
199{
200 int ret = 0;
201
202 do {
203 if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) {
204 ret = -EAGAIN;
205 break;
206 }
207 spin_lock(&nn->nn_lock);
208 ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id);
209 if (ret == 0)
210 list_add_tail(&nsw->ns_node_item,
211 &nn->nn_status_list);
212 spin_unlock(&nn->nn_lock);
213 } while (ret == -EAGAIN);
214
215 if (ret == 0) {
216 init_waitqueue_head(&nsw->ns_wq);
217 nsw->ns_sys_status = O2NET_ERR_NONE;
218 nsw->ns_status = 0;
219 }
220
221 return ret;
222}
223
224static void o2net_complete_nsw_locked(struct o2net_node *nn,
225 struct o2net_status_wait *nsw,
226 enum o2net_system_error sys_status,
227 s32 status)
228{
229 assert_spin_locked(&nn->nn_lock);
230
231 if (!list_empty(&nsw->ns_node_item)) {
232 list_del_init(&nsw->ns_node_item);
233 nsw->ns_sys_status = sys_status;
234 nsw->ns_status = status;
235 idr_remove(&nn->nn_status_idr, nsw->ns_id);
236 wake_up(&nsw->ns_wq);
237 }
238}
239
240static void o2net_complete_nsw(struct o2net_node *nn,
241 struct o2net_status_wait *nsw,
242 u64 id, enum o2net_system_error sys_status,
243 s32 status)
244{
245 spin_lock(&nn->nn_lock);
246 if (nsw == NULL) {
247 if (id > INT_MAX)
248 goto out;
249
250 nsw = idr_find(&nn->nn_status_idr, id);
251 if (nsw == NULL)
252 goto out;
253 }
254
255 o2net_complete_nsw_locked(nn, nsw, sys_status, status);
256
257out:
258 spin_unlock(&nn->nn_lock);
259 return;
260}
261
262static void o2net_complete_nodes_nsw(struct o2net_node *nn)
263{
264 struct list_head *iter, *tmp;
265 unsigned int num_kills = 0;
266 struct o2net_status_wait *nsw;
267
268 assert_spin_locked(&nn->nn_lock);
269
270 list_for_each_safe(iter, tmp, &nn->nn_status_list) {
271 nsw = list_entry(iter, struct o2net_status_wait, ns_node_item);
272 o2net_complete_nsw_locked(nn, nsw, O2NET_ERR_DIED, 0);
273 num_kills++;
274 }
275
276 mlog(0, "completed %d messages for node %u\n", num_kills,
277 o2net_num_from_nn(nn));
278}
279
280static int o2net_nsw_completed(struct o2net_node *nn,
281 struct o2net_status_wait *nsw)
282{
283 int completed;
284 spin_lock(&nn->nn_lock);
285 completed = list_empty(&nsw->ns_node_item);
286 spin_unlock(&nn->nn_lock);
287 return completed;
288}
289
290/* ------------------------------------------------------------ */
291
292static void sc_kref_release(struct kref *kref)
293{
294 struct o2net_sock_container *sc = container_of(kref,
295 struct o2net_sock_container, sc_kref);
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100296 BUG_ON(timer_pending(&sc->sc_idle_timeout));
297
Zach Brown98211482005-12-15 14:31:23 -0800298 sclog(sc, "releasing\n");
299
300 if (sc->sc_sock) {
301 sock_release(sc->sc_sock);
302 sc->sc_sock = NULL;
303 }
304
305 o2nm_node_put(sc->sc_node);
306 sc->sc_node = NULL;
307
308 kfree(sc);
309}
310
311static void sc_put(struct o2net_sock_container *sc)
312{
313 sclog(sc, "put\n");
314 kref_put(&sc->sc_kref, sc_kref_release);
315}
316static void sc_get(struct o2net_sock_container *sc)
317{
318 sclog(sc, "get\n");
319 kref_get(&sc->sc_kref);
320}
321static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
322{
323 struct o2net_sock_container *sc, *ret = NULL;
324 struct page *page = NULL;
325
326 page = alloc_page(GFP_NOFS);
327 sc = kcalloc(1, sizeof(*sc), GFP_NOFS);
328 if (sc == NULL || page == NULL)
329 goto out;
330
331 kref_init(&sc->sc_kref);
332 o2nm_node_get(node);
333 sc->sc_node = node;
334
David Howellsc4028952006-11-22 14:57:56 +0000335 INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
336 INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
337 INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
338 INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
Zach Brown98211482005-12-15 14:31:23 -0800339
340 init_timer(&sc->sc_idle_timeout);
341 sc->sc_idle_timeout.function = o2net_idle_timer;
342 sc->sc_idle_timeout.data = (unsigned long)sc;
343
344 sclog(sc, "alloced\n");
345
346 ret = sc;
347 sc->sc_page = page;
348 sc = NULL;
349 page = NULL;
350
351out:
352 if (page)
353 __free_page(page);
354 kfree(sc);
355
356 return ret;
357}
358
359/* ------------------------------------------------------------ */
360
361static void o2net_sc_queue_work(struct o2net_sock_container *sc,
362 struct work_struct *work)
363{
364 sc_get(sc);
365 if (!queue_work(o2net_wq, work))
366 sc_put(sc);
367}
368static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
David Howellsc4028952006-11-22 14:57:56 +0000369 struct delayed_work *work,
Zach Brown98211482005-12-15 14:31:23 -0800370 int delay)
371{
372 sc_get(sc);
373 if (!queue_delayed_work(o2net_wq, work, delay))
374 sc_put(sc);
375}
376static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc,
David Howellsc4028952006-11-22 14:57:56 +0000377 struct delayed_work *work)
Zach Brown98211482005-12-15 14:31:23 -0800378{
379 if (cancel_delayed_work(work))
380 sc_put(sc);
381}
382
383static void o2net_set_nn_state(struct o2net_node *nn,
384 struct o2net_sock_container *sc,
385 unsigned valid, int err)
386{
387 int was_valid = nn->nn_sc_valid;
388 int was_err = nn->nn_persistent_error;
389 struct o2net_sock_container *old_sc = nn->nn_sc;
390
391 assert_spin_locked(&nn->nn_lock);
392
393 /* the node num comparison and single connect/accept path should stop
394 * an non-null sc from being overwritten with another */
395 BUG_ON(sc && nn->nn_sc && nn->nn_sc != sc);
396 mlog_bug_on_msg(err && valid, "err %d valid %u\n", err, valid);
397 mlog_bug_on_msg(valid && !sc, "valid %u sc %p\n", valid, sc);
398
399 /* we won't reconnect after our valid conn goes away for
400 * this hb iteration.. here so it shows up in the logs */
401 if (was_valid && !valid && err == 0)
402 err = -ENOTCONN;
403
404 mlog(ML_CONN, "node %u sc: %p -> %p, valid %u -> %u, err %d -> %d\n",
405 o2net_num_from_nn(nn), nn->nn_sc, sc, nn->nn_sc_valid, valid,
406 nn->nn_persistent_error, err);
407
408 nn->nn_sc = sc;
409 nn->nn_sc_valid = valid ? 1 : 0;
410 nn->nn_persistent_error = err;
411
412 /* mirrors o2net_tx_can_proceed() */
413 if (nn->nn_persistent_error || nn->nn_sc_valid)
414 wake_up(&nn->nn_sc_wq);
415
416 if (!was_err && nn->nn_persistent_error) {
417 o2quo_conn_err(o2net_num_from_nn(nn));
418 queue_delayed_work(o2net_wq, &nn->nn_still_up,
419 msecs_to_jiffies(O2NET_QUORUM_DELAY_MS));
420 }
421
422 if (was_valid && !valid) {
Sunil Mushran781ee3e2006-04-27 16:41:31 -0700423 printk(KERN_INFO "o2net: no longer connected to "
424 SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
Zach Brown98211482005-12-15 14:31:23 -0800425 o2net_complete_nodes_nsw(nn);
426 }
427
428 if (!was_valid && valid) {
429 o2quo_conn_up(o2net_num_from_nn(nn));
430 /* this is a bit of a hack. we only try reconnecting
431 * when heartbeating starts until we get a connection.
432 * if that connection then dies we don't try reconnecting.
433 * the only way to start connecting again is to down
434 * heartbeat and bring it back up. */
435 cancel_delayed_work(&nn->nn_connect_expired);
Sunil Mushran781ee3e2006-04-27 16:41:31 -0700436 printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n",
437 o2nm_this_node() > sc->sc_node->nd_num ?
438 "connected to" : "accepted connection from",
439 SC_NODEF_ARGS(sc));
Zach Brown98211482005-12-15 14:31:23 -0800440 }
441
442 /* trigger the connecting worker func as long as we're not valid,
443 * it will back off if it shouldn't connect. This can be called
444 * from node config teardown and so needs to be careful about
445 * the work queue actually being up. */
446 if (!valid && o2net_wq) {
447 unsigned long delay;
448 /* delay if we're withing a RECONNECT_DELAY of the
449 * last attempt */
450 delay = (nn->nn_last_connect_attempt +
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100451 msecs_to_jiffies(o2net_reconnect_delay(sc->sc_node)))
Zach Brown98211482005-12-15 14:31:23 -0800452 - jiffies;
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100453 if (delay > msecs_to_jiffies(o2net_reconnect_delay(sc->sc_node)))
Zach Brown98211482005-12-15 14:31:23 -0800454 delay = 0;
455 mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay);
456 queue_delayed_work(o2net_wq, &nn->nn_connect_work, delay);
457 }
458
459 /* keep track of the nn's sc ref for the caller */
460 if ((old_sc == NULL) && sc)
461 sc_get(sc);
462 if (old_sc && (old_sc != sc)) {
463 o2net_sc_queue_work(old_sc, &old_sc->sc_shutdown_work);
464 sc_put(old_sc);
465 }
466}
467
468/* see o2net_register_callbacks() */
469static void o2net_data_ready(struct sock *sk, int bytes)
470{
471 void (*ready)(struct sock *sk, int bytes);
472
473 read_lock(&sk->sk_callback_lock);
474 if (sk->sk_user_data) {
475 struct o2net_sock_container *sc = sk->sk_user_data;
476 sclog(sc, "data_ready hit\n");
477 do_gettimeofday(&sc->sc_tv_data_ready);
478 o2net_sc_queue_work(sc, &sc->sc_rx_work);
479 ready = sc->sc_data_ready;
480 } else {
481 ready = sk->sk_data_ready;
482 }
483 read_unlock(&sk->sk_callback_lock);
484
485 ready(sk, bytes);
486}
487
488/* see o2net_register_callbacks() */
489static void o2net_state_change(struct sock *sk)
490{
491 void (*state_change)(struct sock *sk);
492 struct o2net_sock_container *sc;
493
494 read_lock(&sk->sk_callback_lock);
495 sc = sk->sk_user_data;
496 if (sc == NULL) {
497 state_change = sk->sk_state_change;
498 goto out;
499 }
500
501 sclog(sc, "state_change to %d\n", sk->sk_state);
502
503 state_change = sc->sc_state_change;
504
505 switch(sk->sk_state) {
506 /* ignore connecting sockets as they make progress */
507 case TCP_SYN_SENT:
508 case TCP_SYN_RECV:
509 break;
510 case TCP_ESTABLISHED:
511 o2net_sc_queue_work(sc, &sc->sc_connect_work);
512 break;
513 default:
514 o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
515 break;
516 }
517out:
518 read_unlock(&sk->sk_callback_lock);
519 state_change(sk);
520}
521
522/*
523 * we register callbacks so we can queue work on events before calling
524 * the original callbacks. our callbacks our careful to test user_data
525 * to discover when they've reaced with o2net_unregister_callbacks().
526 */
527static void o2net_register_callbacks(struct sock *sk,
528 struct o2net_sock_container *sc)
529{
530 write_lock_bh(&sk->sk_callback_lock);
531
532 /* accepted sockets inherit the old listen socket data ready */
533 if (sk->sk_data_ready == o2net_listen_data_ready) {
534 sk->sk_data_ready = sk->sk_user_data;
535 sk->sk_user_data = NULL;
536 }
537
538 BUG_ON(sk->sk_user_data != NULL);
539 sk->sk_user_data = sc;
540 sc_get(sc);
541
542 sc->sc_data_ready = sk->sk_data_ready;
543 sc->sc_state_change = sk->sk_state_change;
544 sk->sk_data_ready = o2net_data_ready;
545 sk->sk_state_change = o2net_state_change;
546
547 write_unlock_bh(&sk->sk_callback_lock);
548}
549
550static int o2net_unregister_callbacks(struct sock *sk,
551 struct o2net_sock_container *sc)
552{
553 int ret = 0;
554
555 write_lock_bh(&sk->sk_callback_lock);
556 if (sk->sk_user_data == sc) {
557 ret = 1;
558 sk->sk_user_data = NULL;
559 sk->sk_data_ready = sc->sc_data_ready;
560 sk->sk_state_change = sc->sc_state_change;
561 }
562 write_unlock_bh(&sk->sk_callback_lock);
563
564 return ret;
565}
566
567/*
568 * this is a little helper that is called by callers who have seen a problem
569 * with an sc and want to detach it from the nn if someone already hasn't beat
570 * them to it. if an error is given then the shutdown will be persistent
571 * and pending transmits will be canceled.
572 */
573static void o2net_ensure_shutdown(struct o2net_node *nn,
574 struct o2net_sock_container *sc,
575 int err)
576{
577 spin_lock(&nn->nn_lock);
578 if (nn->nn_sc == sc)
579 o2net_set_nn_state(nn, NULL, 0, err);
580 spin_unlock(&nn->nn_lock);
581}
582
583/*
584 * This work queue function performs the blocking parts of socket shutdown. A
585 * few paths lead here. set_nn_state will trigger this callback if it sees an
586 * sc detached from the nn. state_change will also trigger this callback
587 * directly when it sees errors. In that case we need to call set_nn_state
588 * ourselves as state_change couldn't get the nn_lock and call set_nn_state
589 * itself.
590 */
David Howellsc4028952006-11-22 14:57:56 +0000591static void o2net_shutdown_sc(struct work_struct *work)
Zach Brown98211482005-12-15 14:31:23 -0800592{
David Howellsc4028952006-11-22 14:57:56 +0000593 struct o2net_sock_container *sc =
594 container_of(work, struct o2net_sock_container,
595 sc_shutdown_work);
Zach Brown98211482005-12-15 14:31:23 -0800596 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
597
598 sclog(sc, "shutting down\n");
599
600 /* drop the callbacks ref and call shutdown only once */
601 if (o2net_unregister_callbacks(sc->sc_sock->sk, sc)) {
602 /* we shouldn't flush as we're in the thread, the
603 * races with pending sc work structs are harmless */
604 del_timer_sync(&sc->sc_idle_timeout);
605 o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
606 sc_put(sc);
607 sc->sc_sock->ops->shutdown(sc->sc_sock,
608 RCV_SHUTDOWN|SEND_SHUTDOWN);
609 }
610
611 /* not fatal so failed connects before the other guy has our
612 * heartbeat can be retried */
613 o2net_ensure_shutdown(nn, sc, 0);
614 sc_put(sc);
615}
616
617/* ------------------------------------------------------------ */
618
619static int o2net_handler_cmp(struct o2net_msg_handler *nmh, u32 msg_type,
620 u32 key)
621{
622 int ret = memcmp(&nmh->nh_key, &key, sizeof(key));
623
624 if (ret == 0)
625 ret = memcmp(&nmh->nh_msg_type, &msg_type, sizeof(msg_type));
626
627 return ret;
628}
629
630static struct o2net_msg_handler *
631o2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p,
632 struct rb_node **ret_parent)
633{
634 struct rb_node **p = &o2net_handler_tree.rb_node;
635 struct rb_node *parent = NULL;
636 struct o2net_msg_handler *nmh, *ret = NULL;
637 int cmp;
638
639 while (*p) {
640 parent = *p;
641 nmh = rb_entry(parent, struct o2net_msg_handler, nh_node);
642 cmp = o2net_handler_cmp(nmh, msg_type, key);
643
644 if (cmp < 0)
645 p = &(*p)->rb_left;
646 else if (cmp > 0)
647 p = &(*p)->rb_right;
648 else {
649 ret = nmh;
650 break;
651 }
652 }
653
654 if (ret_p != NULL)
655 *ret_p = p;
656 if (ret_parent != NULL)
657 *ret_parent = parent;
658
659 return ret;
660}
661
662static void o2net_handler_kref_release(struct kref *kref)
663{
664 struct o2net_msg_handler *nmh;
665 nmh = container_of(kref, struct o2net_msg_handler, nh_kref);
666
667 kfree(nmh);
668}
669
670static void o2net_handler_put(struct o2net_msg_handler *nmh)
671{
672 kref_put(&nmh->nh_kref, o2net_handler_kref_release);
673}
674
675/* max_len is protection for the handler func. incoming messages won't
676 * be given to the handler if their payload is longer than the max. */
677int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
678 o2net_msg_handler_func *func, void *data,
679 struct list_head *unreg_list)
680{
681 struct o2net_msg_handler *nmh = NULL;
682 struct rb_node **p, *parent;
683 int ret = 0;
684
685 if (max_len > O2NET_MAX_PAYLOAD_BYTES) {
686 mlog(0, "max_len for message handler out of range: %u\n",
687 max_len);
688 ret = -EINVAL;
689 goto out;
690 }
691
692 if (!msg_type) {
693 mlog(0, "no message type provided: %u, %p\n", msg_type, func);
694 ret = -EINVAL;
695 goto out;
696
697 }
698 if (!func) {
699 mlog(0, "no message handler provided: %u, %p\n",
700 msg_type, func);
701 ret = -EINVAL;
702 goto out;
703 }
704
705 nmh = kcalloc(1, sizeof(struct o2net_msg_handler), GFP_NOFS);
706 if (nmh == NULL) {
707 ret = -ENOMEM;
708 goto out;
709 }
710
711 nmh->nh_func = func;
712 nmh->nh_func_data = data;
713 nmh->nh_msg_type = msg_type;
714 nmh->nh_max_len = max_len;
715 nmh->nh_key = key;
716 /* the tree and list get this ref.. they're both removed in
717 * unregister when this ref is dropped */
718 kref_init(&nmh->nh_kref);
719 INIT_LIST_HEAD(&nmh->nh_unregister_item);
720
721 write_lock(&o2net_handler_lock);
722 if (o2net_handler_tree_lookup(msg_type, key, &p, &parent))
723 ret = -EEXIST;
724 else {
725 rb_link_node(&nmh->nh_node, parent, p);
726 rb_insert_color(&nmh->nh_node, &o2net_handler_tree);
727 list_add_tail(&nmh->nh_unregister_item, unreg_list);
728
729 mlog(ML_TCP, "registered handler func %p type %u key %08x\n",
730 func, msg_type, key);
731 /* we've had some trouble with handlers seemingly vanishing. */
732 mlog_bug_on_msg(o2net_handler_tree_lookup(msg_type, key, &p,
733 &parent) == NULL,
734 "couldn't find handler we *just* registerd "
735 "for type %u key %08x\n", msg_type, key);
736 }
737 write_unlock(&o2net_handler_lock);
738 if (ret)
739 goto out;
740
741out:
742 if (ret)
743 kfree(nmh);
744
745 return ret;
746}
747EXPORT_SYMBOL_GPL(o2net_register_handler);
748
749void o2net_unregister_handler_list(struct list_head *list)
750{
751 struct list_head *pos, *n;
752 struct o2net_msg_handler *nmh;
753
754 write_lock(&o2net_handler_lock);
755 list_for_each_safe(pos, n, list) {
756 nmh = list_entry(pos, struct o2net_msg_handler,
757 nh_unregister_item);
758 mlog(ML_TCP, "unregistering handler func %p type %u key %08x\n",
759 nmh->nh_func, nmh->nh_msg_type, nmh->nh_key);
760 rb_erase(&nmh->nh_node, &o2net_handler_tree);
761 list_del_init(&nmh->nh_unregister_item);
762 kref_put(&nmh->nh_kref, o2net_handler_kref_release);
763 }
764 write_unlock(&o2net_handler_lock);
765}
766EXPORT_SYMBOL_GPL(o2net_unregister_handler_list);
767
768static struct o2net_msg_handler *o2net_handler_get(u32 msg_type, u32 key)
769{
770 struct o2net_msg_handler *nmh;
771
772 read_lock(&o2net_handler_lock);
773 nmh = o2net_handler_tree_lookup(msg_type, key, NULL, NULL);
774 if (nmh)
775 kref_get(&nmh->nh_kref);
776 read_unlock(&o2net_handler_lock);
777
778 return nmh;
779}
780
781/* ------------------------------------------------------------ */
782
783static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
784{
785 int ret;
786 mm_segment_t oldfs;
787 struct kvec vec = {
788 .iov_len = len,
789 .iov_base = data,
790 };
791 struct msghdr msg = {
792 .msg_iovlen = 1,
793 .msg_iov = (struct iovec *)&vec,
794 .msg_flags = MSG_DONTWAIT,
795 };
796
797 oldfs = get_fs();
798 set_fs(get_ds());
799 ret = sock_recvmsg(sock, &msg, len, msg.msg_flags);
800 set_fs(oldfs);
801
802 return ret;
803}
804
805static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
806 size_t veclen, size_t total)
807{
808 int ret;
809 mm_segment_t oldfs;
810 struct msghdr msg = {
811 .msg_iov = (struct iovec *)vec,
812 .msg_iovlen = veclen,
813 };
814
815 if (sock == NULL) {
816 ret = -EINVAL;
817 goto out;
818 }
819
820 oldfs = get_fs();
821 set_fs(get_ds());
822 ret = sock_sendmsg(sock, &msg, total);
823 set_fs(oldfs);
824 if (ret != total) {
825 mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret,
826 total);
827 if (ret >= 0)
828 ret = -EPIPE; /* should be smarter, I bet */
829 goto out;
830 }
831
832 ret = 0;
833out:
834 if (ret < 0)
835 mlog(0, "returning error: %d\n", ret);
836 return ret;
837}
838
839static void o2net_sendpage(struct o2net_sock_container *sc,
840 void *kmalloced_virt,
841 size_t size)
842{
843 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
844 ssize_t ret;
845
846
847 ret = sc->sc_sock->ops->sendpage(sc->sc_sock,
848 virt_to_page(kmalloced_virt),
849 (long)kmalloced_virt & ~PAGE_MASK,
850 size, MSG_DONTWAIT);
851 if (ret != size) {
852 mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
853 " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
854 o2net_ensure_shutdown(nn, sc, 0);
855 }
856}
857
858static void o2net_init_msg(struct o2net_msg *msg, u16 data_len, u16 msg_type, u32 key)
859{
860 memset(msg, 0, sizeof(struct o2net_msg));
861 msg->magic = cpu_to_be16(O2NET_MSG_MAGIC);
862 msg->data_len = cpu_to_be16(data_len);
863 msg->msg_type = cpu_to_be16(msg_type);
864 msg->sys_status = cpu_to_be32(O2NET_ERR_NONE);
865 msg->status = 0;
866 msg->key = cpu_to_be32(key);
867}
868
869static int o2net_tx_can_proceed(struct o2net_node *nn,
870 struct o2net_sock_container **sc_ret,
871 int *error)
872{
873 int ret = 0;
874
875 spin_lock(&nn->nn_lock);
876 if (nn->nn_persistent_error) {
877 ret = 1;
878 *sc_ret = NULL;
879 *error = nn->nn_persistent_error;
880 } else if (nn->nn_sc_valid) {
881 kref_get(&nn->nn_sc->sc_kref);
882
883 ret = 1;
884 *sc_ret = nn->nn_sc;
885 *error = 0;
886 }
887 spin_unlock(&nn->nn_lock);
888
889 return ret;
890}
891
892int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
893 size_t caller_veclen, u8 target_node, int *status)
894{
895 int ret, error = 0;
896 struct o2net_msg *msg = NULL;
897 size_t veclen, caller_bytes = 0;
898 struct kvec *vec = NULL;
899 struct o2net_sock_container *sc = NULL;
900 struct o2net_node *nn = o2net_nn_from_num(target_node);
901 struct o2net_status_wait nsw = {
902 .ns_node_item = LIST_HEAD_INIT(nsw.ns_node_item),
903 };
904
905 if (o2net_wq == NULL) {
906 mlog(0, "attempt to tx without o2netd running\n");
907 ret = -ESRCH;
908 goto out;
909 }
910
911 if (caller_veclen == 0) {
912 mlog(0, "bad kvec array length\n");
913 ret = -EINVAL;
914 goto out;
915 }
916
917 caller_bytes = iov_length((struct iovec *)caller_vec, caller_veclen);
918 if (caller_bytes > O2NET_MAX_PAYLOAD_BYTES) {
919 mlog(0, "total payload len %zu too large\n", caller_bytes);
920 ret = -EINVAL;
921 goto out;
922 }
923
924 if (target_node == o2nm_this_node()) {
925 ret = -ELOOP;
926 goto out;
927 }
928
929 ret = wait_event_interruptible(nn->nn_sc_wq,
930 o2net_tx_can_proceed(nn, &sc, &error));
931 if (!ret && error)
932 ret = error;
933 if (ret)
934 goto out;
935
936 veclen = caller_veclen + 1;
937 vec = kmalloc(sizeof(struct kvec) * veclen, GFP_ATOMIC);
938 if (vec == NULL) {
939 mlog(0, "failed to %zu element kvec!\n", veclen);
940 ret = -ENOMEM;
941 goto out;
942 }
943
944 msg = kmalloc(sizeof(struct o2net_msg), GFP_ATOMIC);
945 if (!msg) {
946 mlog(0, "failed to allocate a o2net_msg!\n");
947 ret = -ENOMEM;
948 goto out;
949 }
950
951 o2net_init_msg(msg, caller_bytes, msg_type, key);
952
953 vec[0].iov_len = sizeof(struct o2net_msg);
954 vec[0].iov_base = msg;
955 memcpy(&vec[1], caller_vec, caller_veclen * sizeof(struct kvec));
956
957 ret = o2net_prep_nsw(nn, &nsw);
958 if (ret)
959 goto out;
960
961 msg->msg_num = cpu_to_be32(nsw.ns_id);
962
963 /* finally, convert the message header to network byte-order
964 * and send */
965 ret = o2net_send_tcp_msg(sc->sc_sock, vec, veclen,
966 sizeof(struct o2net_msg) + caller_bytes);
967 msglog(msg, "sending returned %d\n", ret);
968 if (ret < 0) {
969 mlog(0, "error returned from o2net_send_tcp_msg=%d\n", ret);
970 goto out;
971 }
972
973 /* wait on other node's handler */
974 wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw));
975
976 /* Note that we avoid overwriting the callers status return
977 * variable if a system error was reported on the other
978 * side. Callers beware. */
979 ret = o2net_sys_err_to_errno(nsw.ns_sys_status);
980 if (status && !ret)
981 *status = nsw.ns_status;
982
983 mlog(0, "woken, returning system status %d, user status %d\n",
984 ret, nsw.ns_status);
985out:
986 if (sc)
987 sc_put(sc);
988 if (vec)
989 kfree(vec);
990 if (msg)
991 kfree(msg);
992 o2net_complete_nsw(nn, &nsw, 0, 0, 0);
993 return ret;
994}
995EXPORT_SYMBOL_GPL(o2net_send_message_vec);
996
997int o2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
998 u8 target_node, int *status)
999{
1000 struct kvec vec = {
1001 .iov_base = data,
1002 .iov_len = len,
1003 };
1004 return o2net_send_message_vec(msg_type, key, &vec, 1,
1005 target_node, status);
1006}
1007EXPORT_SYMBOL_GPL(o2net_send_message);
1008
1009static int o2net_send_status_magic(struct socket *sock, struct o2net_msg *hdr,
1010 enum o2net_system_error syserr, int err)
1011{
1012 struct kvec vec = {
1013 .iov_base = hdr,
1014 .iov_len = sizeof(struct o2net_msg),
1015 };
1016
1017 BUG_ON(syserr >= O2NET_ERR_MAX);
1018
1019 /* leave other fields intact from the incoming message, msg_num
1020 * in particular */
1021 hdr->sys_status = cpu_to_be32(syserr);
1022 hdr->status = cpu_to_be32(err);
1023 hdr->magic = cpu_to_be16(O2NET_MSG_STATUS_MAGIC); // twiddle the magic
1024 hdr->data_len = 0;
1025
1026 msglog(hdr, "about to send status magic %d\n", err);
1027 /* hdr has been in host byteorder this whole time */
1028 return o2net_send_tcp_msg(sock, &vec, 1, sizeof(struct o2net_msg));
1029}
1030
1031/* this returns -errno if the header was unknown or too large, etc.
1032 * after this is called the buffer us reused for the next message */
1033static int o2net_process_message(struct o2net_sock_container *sc,
1034 struct o2net_msg *hdr)
1035{
1036 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
1037 int ret = 0, handler_status;
1038 enum o2net_system_error syserr;
1039 struct o2net_msg_handler *nmh = NULL;
1040
1041 msglog(hdr, "processing message\n");
1042
1043 o2net_sc_postpone_idle(sc);
1044
1045 switch(be16_to_cpu(hdr->magic)) {
1046 case O2NET_MSG_STATUS_MAGIC:
1047 /* special type for returning message status */
1048 o2net_complete_nsw(nn, NULL,
1049 be32_to_cpu(hdr->msg_num),
1050 be32_to_cpu(hdr->sys_status),
1051 be32_to_cpu(hdr->status));
1052 goto out;
1053 case O2NET_MSG_KEEP_REQ_MAGIC:
1054 o2net_sendpage(sc, o2net_keep_resp,
1055 sizeof(*o2net_keep_resp));
1056 goto out;
1057 case O2NET_MSG_KEEP_RESP_MAGIC:
1058 goto out;
1059 case O2NET_MSG_MAGIC:
1060 break;
1061 default:
1062 msglog(hdr, "bad magic\n");
1063 ret = -EINVAL;
1064 goto out;
1065 break;
1066 }
1067
1068 /* find a handler for it */
1069 handler_status = 0;
1070 nmh = o2net_handler_get(be16_to_cpu(hdr->msg_type),
1071 be32_to_cpu(hdr->key));
1072 if (!nmh) {
1073 mlog(ML_TCP, "couldn't find handler for type %u key %08x\n",
1074 be16_to_cpu(hdr->msg_type), be32_to_cpu(hdr->key));
1075 syserr = O2NET_ERR_NO_HNDLR;
1076 goto out_respond;
1077 }
1078
1079 syserr = O2NET_ERR_NONE;
1080
1081 if (be16_to_cpu(hdr->data_len) > nmh->nh_max_len)
1082 syserr = O2NET_ERR_OVERFLOW;
1083
1084 if (syserr != O2NET_ERR_NONE)
1085 goto out_respond;
1086
1087 do_gettimeofday(&sc->sc_tv_func_start);
1088 sc->sc_msg_key = be32_to_cpu(hdr->key);
1089 sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
1090 handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) +
1091 be16_to_cpu(hdr->data_len),
1092 nmh->nh_func_data);
1093 do_gettimeofday(&sc->sc_tv_func_stop);
1094
1095out_respond:
1096 /* this destroys the hdr, so don't use it after this */
1097 ret = o2net_send_status_magic(sc->sc_sock, hdr, syserr,
1098 handler_status);
1099 hdr = NULL;
1100 mlog(0, "sending handler status %d, syserr %d returned %d\n",
1101 handler_status, syserr, ret);
1102
1103out:
1104 if (nmh)
1105 o2net_handler_put(nmh);
1106 return ret;
1107}
1108
1109static int o2net_check_handshake(struct o2net_sock_container *sc)
1110{
1111 struct o2net_handshake *hand = page_address(sc->sc_page);
1112 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
1113
1114 if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) {
1115 mlog(ML_NOTICE, SC_NODEF_FMT " advertised net protocol "
1116 "version %llu but %llu is required, disconnecting\n",
1117 SC_NODEF_ARGS(sc),
1118 (unsigned long long)be64_to_cpu(hand->protocol_version),
1119 O2NET_PROTOCOL_VERSION);
1120
1121 /* don't bother reconnecting if its the wrong version. */
1122 o2net_ensure_shutdown(nn, sc, -ENOTCONN);
1123 return -1;
1124 }
1125
1126 sc->sc_handshake_ok = 1;
1127
1128 spin_lock(&nn->nn_lock);
1129 /* set valid and queue the idle timers only if it hasn't been
1130 * shut down already */
1131 if (nn->nn_sc == sc) {
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +01001132 o2net_sc_reset_idle_timer(sc);
Zach Brown98211482005-12-15 14:31:23 -08001133 o2net_set_nn_state(nn, sc, 1, 0);
1134 }
1135 spin_unlock(&nn->nn_lock);
1136
1137 /* shift everything up as though it wasn't there */
1138 sc->sc_page_off -= sizeof(struct o2net_handshake);
1139 if (sc->sc_page_off)
1140 memmove(hand, hand + 1, sc->sc_page_off);
1141
1142 return 0;
1143}
1144
1145/* this demuxes the queued rx bytes into header or payload bits and calls
1146 * handlers as each full message is read off the socket. it returns -error,
1147 * == 0 eof, or > 0 for progress made.*/
1148static int o2net_advance_rx(struct o2net_sock_container *sc)
1149{
1150 struct o2net_msg *hdr;
1151 int ret = 0;
1152 void *data;
1153 size_t datalen;
1154
1155 sclog(sc, "receiving\n");
1156 do_gettimeofday(&sc->sc_tv_advance_start);
1157
1158 /* do we need more header? */
1159 if (sc->sc_page_off < sizeof(struct o2net_msg)) {
1160 data = page_address(sc->sc_page) + sc->sc_page_off;
1161 datalen = sizeof(struct o2net_msg) - sc->sc_page_off;
1162 ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen);
1163 if (ret > 0) {
1164 sc->sc_page_off += ret;
1165
1166 /* this working relies on the handshake being
1167 * smaller than the normal message header */
1168 if (sc->sc_page_off >= sizeof(struct o2net_handshake)&&
1169 !sc->sc_handshake_ok && o2net_check_handshake(sc)) {
1170 ret = -EPROTO;
1171 goto out;
1172 }
1173
1174 /* only swab incoming here.. we can
1175 * only get here once as we cross from
1176 * being under to over */
1177 if (sc->sc_page_off == sizeof(struct o2net_msg)) {
1178 hdr = page_address(sc->sc_page);
1179 if (be16_to_cpu(hdr->data_len) >
1180 O2NET_MAX_PAYLOAD_BYTES)
1181 ret = -EOVERFLOW;
1182 }
1183 }
1184 if (ret <= 0)
1185 goto out;
1186 }
1187
1188 if (sc->sc_page_off < sizeof(struct o2net_msg)) {
1189 /* oof, still don't have a header */
1190 goto out;
1191 }
1192
1193 /* this was swabbed above when we first read it */
1194 hdr = page_address(sc->sc_page);
1195
1196 msglog(hdr, "at page_off %zu\n", sc->sc_page_off);
1197
1198 /* do we need more payload? */
1199 if (sc->sc_page_off - sizeof(struct o2net_msg) < be16_to_cpu(hdr->data_len)) {
1200 /* need more payload */
1201 data = page_address(sc->sc_page) + sc->sc_page_off;
1202 datalen = (sizeof(struct o2net_msg) + be16_to_cpu(hdr->data_len)) -
1203 sc->sc_page_off;
1204 ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen);
1205 if (ret > 0)
1206 sc->sc_page_off += ret;
1207 if (ret <= 0)
1208 goto out;
1209 }
1210
1211 if (sc->sc_page_off - sizeof(struct o2net_msg) == be16_to_cpu(hdr->data_len)) {
1212 /* we can only get here once, the first time we read
1213 * the payload.. so set ret to progress if the handler
1214 * works out. after calling this the message is toast */
1215 ret = o2net_process_message(sc, hdr);
1216 if (ret == 0)
1217 ret = 1;
1218 sc->sc_page_off = 0;
1219 }
1220
1221out:
1222 sclog(sc, "ret = %d\n", ret);
1223 do_gettimeofday(&sc->sc_tv_advance_stop);
1224 return ret;
1225}
1226
1227/* this work func is triggerd by data ready. it reads until it can read no
1228 * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
1229 * our work the work struct will be marked and we'll be called again. */
David Howellsc4028952006-11-22 14:57:56 +00001230static void o2net_rx_until_empty(struct work_struct *work)
Zach Brown98211482005-12-15 14:31:23 -08001231{
David Howellsc4028952006-11-22 14:57:56 +00001232 struct o2net_sock_container *sc =
1233 container_of(work, struct o2net_sock_container, sc_rx_work);
Zach Brown98211482005-12-15 14:31:23 -08001234 int ret;
1235
1236 do {
1237 ret = o2net_advance_rx(sc);
1238 } while (ret > 0);
1239
1240 if (ret <= 0 && ret != -EAGAIN) {
1241 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
1242 sclog(sc, "saw error %d, closing\n", ret);
1243 /* not permanent so read failed handshake can retry */
1244 o2net_ensure_shutdown(nn, sc, 0);
1245 }
1246
1247 sc_put(sc);
1248}
1249
1250static int o2net_set_nodelay(struct socket *sock)
1251{
1252 int ret, val = 1;
1253 mm_segment_t oldfs;
1254
1255 oldfs = get_fs();
1256 set_fs(KERNEL_DS);
1257
1258 /*
1259 * Dear unsuspecting programmer,
1260 *
1261 * Don't use sock_setsockopt() for SOL_TCP. It doesn't check its level
1262 * argument and assumes SOL_SOCKET so, say, your TCP_NODELAY will
1263 * silently turn into SO_DEBUG.
1264 *
1265 * Yours,
1266 * Keeper of hilariously fragile interfaces.
1267 */
1268 ret = sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY,
1269 (char __user *)&val, sizeof(val));
1270
1271 set_fs(oldfs);
1272 return ret;
1273}
1274
1275/* ------------------------------------------------------------ */
1276
1277/* called when a connect completes and after a sock is accepted. the
1278 * rx path will see the response and mark the sc valid */
David Howellsc4028952006-11-22 14:57:56 +00001279static void o2net_sc_connect_completed(struct work_struct *work)
Zach Brown98211482005-12-15 14:31:23 -08001280{
David Howellsc4028952006-11-22 14:57:56 +00001281 struct o2net_sock_container *sc =
1282 container_of(work, struct o2net_sock_container,
1283 sc_connect_work);
Zach Brown98211482005-12-15 14:31:23 -08001284
1285 mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
1286 (unsigned long long)O2NET_PROTOCOL_VERSION,
1287 (unsigned long long)be64_to_cpu(o2net_hand->connector_id));
1288
1289 o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand));
1290 sc_put(sc);
1291}
1292
1293/* this is called as a work_struct func. */
David Howellsc4028952006-11-22 14:57:56 +00001294static void o2net_sc_send_keep_req(struct work_struct *work)
Zach Brown98211482005-12-15 14:31:23 -08001295{
David Howellsc4028952006-11-22 14:57:56 +00001296 struct o2net_sock_container *sc =
1297 container_of(work, struct o2net_sock_container,
1298 sc_keepalive_work.work);
Zach Brown98211482005-12-15 14:31:23 -08001299
1300 o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req));
1301 sc_put(sc);
1302}
1303
1304/* socket shutdown does a del_timer_sync against this as it tears down.
1305 * we can't start this timer until we've got to the point in sc buildup
1306 * where shutdown is going to be involved */
1307static void o2net_idle_timer(unsigned long data)
1308{
1309 struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
1310 struct timeval now;
1311
1312 do_gettimeofday(&now);
1313
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +01001314 printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
1315 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
1316 o2net_idle_timeout(sc->sc_node) / 1000,
1317 o2net_idle_timeout(sc->sc_node) % 1000);
Zach Brown98211482005-12-15 14:31:23 -08001318 mlog(ML_NOTICE, "here are some times that might help debug the "
1319 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
1320 "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
Mark Fasheh215c7f92006-02-01 16:42:10 -08001321 sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
1322 now.tv_sec, (long) now.tv_usec,
1323 sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
1324 sc->sc_tv_advance_start.tv_sec,
1325 (long) sc->sc_tv_advance_start.tv_usec,
1326 sc->sc_tv_advance_stop.tv_sec,
1327 (long) sc->sc_tv_advance_stop.tv_usec,
Zach Brown98211482005-12-15 14:31:23 -08001328 sc->sc_msg_key, sc->sc_msg_type,
Mark Fasheh215c7f92006-02-01 16:42:10 -08001329 sc->sc_tv_func_start.tv_sec, (long) sc->sc_tv_func_start.tv_usec,
1330 sc->sc_tv_func_stop.tv_sec, (long) sc->sc_tv_func_stop.tv_usec);
Zach Brown98211482005-12-15 14:31:23 -08001331
1332 o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
1333}
1334
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +01001335static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc)
Zach Brown98211482005-12-15 14:31:23 -08001336{
1337 o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
1338 o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +01001339 msecs_to_jiffies(o2net_keepalive_delay(sc->sc_node)));
Zach Brown98211482005-12-15 14:31:23 -08001340 do_gettimeofday(&sc->sc_tv_timer);
1341 mod_timer(&sc->sc_idle_timeout,
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +01001342 jiffies + msecs_to_jiffies(o2net_idle_timeout(sc->sc_node)));
1343}
1344
1345static void o2net_sc_postpone_idle(struct o2net_sock_container *sc)
1346{
1347 /* Only push out an existing timer */
1348 if (timer_pending(&sc->sc_idle_timeout))
1349 o2net_sc_reset_idle_timer(sc);
Zach Brown98211482005-12-15 14:31:23 -08001350}
1351
1352/* this work func is kicked whenever a path sets the nn state which doesn't
1353 * have valid set. This includes seeing hb come up, losing a connection,
1354 * having a connect attempt fail, etc. This centralizes the logic which decides
1355 * if a connect attempt should be made or if we should give up and all future
1356 * transmit attempts should fail */
David Howellsc4028952006-11-22 14:57:56 +00001357static void o2net_start_connect(struct work_struct *work)
Zach Brown98211482005-12-15 14:31:23 -08001358{
David Howellsc4028952006-11-22 14:57:56 +00001359 struct o2net_node *nn =
1360 container_of(work, struct o2net_node, nn_connect_work.work);
Zach Brown98211482005-12-15 14:31:23 -08001361 struct o2net_sock_container *sc = NULL;
Sunil Mushranb7668c72006-02-28 23:28:01 -08001362 struct o2nm_node *node = NULL, *mynode = NULL;
Zach Brown98211482005-12-15 14:31:23 -08001363 struct socket *sock = NULL;
1364 struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
David Howellsc4028952006-11-22 14:57:56 +00001365 int ret = 0, stop;
Zach Brown98211482005-12-15 14:31:23 -08001366
1367 /* if we're greater we initiate tx, otherwise we accept */
1368 if (o2nm_this_node() <= o2net_num_from_nn(nn))
1369 goto out;
1370
1371 /* watch for racing with tearing a node down */
1372 node = o2nm_get_node_by_num(o2net_num_from_nn(nn));
1373 if (node == NULL) {
1374 ret = 0;
1375 goto out;
1376 }
1377
Sunil Mushranb7668c72006-02-28 23:28:01 -08001378 mynode = o2nm_get_node_by_num(o2nm_this_node());
1379 if (mynode == NULL) {
1380 ret = 0;
1381 goto out;
1382 }
1383
Zach Brown98211482005-12-15 14:31:23 -08001384 spin_lock(&nn->nn_lock);
1385 /* see if we already have one pending or have given up */
David Howellsc4028952006-11-22 14:57:56 +00001386 stop = (nn->nn_sc || nn->nn_persistent_error);
Zach Brown98211482005-12-15 14:31:23 -08001387 spin_unlock(&nn->nn_lock);
David Howellsc4028952006-11-22 14:57:56 +00001388 if (stop)
Zach Brown98211482005-12-15 14:31:23 -08001389 goto out;
1390
1391 nn->nn_last_connect_attempt = jiffies;
1392
1393 sc = sc_alloc(node);
1394 if (sc == NULL) {
1395 mlog(0, "couldn't allocate sc\n");
1396 ret = -ENOMEM;
1397 goto out;
1398 }
1399
1400 ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
1401 if (ret < 0) {
1402 mlog(0, "can't create socket: %d\n", ret);
1403 goto out;
1404 }
1405 sc->sc_sock = sock; /* freed by sc_kref_release */
1406
1407 sock->sk->sk_allocation = GFP_ATOMIC;
1408
1409 myaddr.sin_family = AF_INET;
Sunil Mushranb7668c72006-02-28 23:28:01 -08001410 myaddr.sin_addr.s_addr = (__force u32)mynode->nd_ipv4_address;
Zach Brown98211482005-12-15 14:31:23 -08001411 myaddr.sin_port = (__force u16)htons(0); /* any port */
1412
1413 ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr,
1414 sizeof(myaddr));
1415 if (ret) {
Sunil Mushranb7668c72006-02-28 23:28:01 -08001416 mlog(ML_ERROR, "bind failed with %d at address %u.%u.%u.%u\n",
1417 ret, NIPQUAD(mynode->nd_ipv4_address));
Zach Brown98211482005-12-15 14:31:23 -08001418 goto out;
1419 }
1420
1421 ret = o2net_set_nodelay(sc->sc_sock);
1422 if (ret) {
1423 mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
1424 goto out;
1425 }
1426
1427 o2net_register_callbacks(sc->sc_sock->sk, sc);
1428
1429 spin_lock(&nn->nn_lock);
1430 /* handshake completion will set nn->nn_sc_valid */
1431 o2net_set_nn_state(nn, sc, 0, 0);
1432 spin_unlock(&nn->nn_lock);
1433
1434 remoteaddr.sin_family = AF_INET;
1435 remoteaddr.sin_addr.s_addr = (__force u32)node->nd_ipv4_address;
1436 remoteaddr.sin_port = (__force u16)node->nd_ipv4_port;
1437
1438 ret = sc->sc_sock->ops->connect(sc->sc_sock,
1439 (struct sockaddr *)&remoteaddr,
1440 sizeof(remoteaddr),
1441 O_NONBLOCK);
1442 if (ret == -EINPROGRESS)
1443 ret = 0;
1444
1445out:
1446 if (ret) {
1447 mlog(ML_NOTICE, "connect attempt to " SC_NODEF_FMT " failed "
1448 "with errno %d\n", SC_NODEF_ARGS(sc), ret);
1449 /* 0 err so that another will be queued and attempted
1450 * from set_nn_state */
1451 if (sc)
1452 o2net_ensure_shutdown(nn, sc, 0);
1453 }
1454 if (sc)
1455 sc_put(sc);
1456 if (node)
1457 o2nm_node_put(node);
Sunil Mushranb7668c72006-02-28 23:28:01 -08001458 if (mynode)
1459 o2nm_node_put(mynode);
Zach Brown98211482005-12-15 14:31:23 -08001460
1461 return;
1462}
1463
David Howellsc4028952006-11-22 14:57:56 +00001464static void o2net_connect_expired(struct work_struct *work)
Zach Brown98211482005-12-15 14:31:23 -08001465{
David Howellsc4028952006-11-22 14:57:56 +00001466 struct o2net_node *nn =
1467 container_of(work, struct o2net_node, nn_connect_expired.work);
Zach Brown98211482005-12-15 14:31:23 -08001468
1469 spin_lock(&nn->nn_lock);
1470 if (!nn->nn_sc_valid) {
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +01001471 struct o2nm_node *node = nn->nn_sc->sc_node;
Zach Brown98211482005-12-15 14:31:23 -08001472 mlog(ML_ERROR, "no connection established with node %u after "
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +01001473 "%u.%u seconds, giving up and returning errors.\n",
1474 o2net_num_from_nn(nn),
1475 o2net_idle_timeout(node) / 1000,
1476 o2net_idle_timeout(node) % 1000);
Zach Brown98211482005-12-15 14:31:23 -08001477
1478 o2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
1479 }
1480 spin_unlock(&nn->nn_lock);
1481}
1482
David Howellsc4028952006-11-22 14:57:56 +00001483static void o2net_still_up(struct work_struct *work)
Zach Brown98211482005-12-15 14:31:23 -08001484{
David Howellsc4028952006-11-22 14:57:56 +00001485 struct o2net_node *nn =
1486 container_of(work, struct o2net_node, nn_still_up.work);
Zach Brown98211482005-12-15 14:31:23 -08001487
1488 o2quo_hb_still_up(o2net_num_from_nn(nn));
1489}
1490
1491/* ------------------------------------------------------------ */
1492
1493void o2net_disconnect_node(struct o2nm_node *node)
1494{
1495 struct o2net_node *nn = o2net_nn_from_num(node->nd_num);
1496
1497 /* don't reconnect until it's heartbeating again */
1498 spin_lock(&nn->nn_lock);
1499 o2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
1500 spin_unlock(&nn->nn_lock);
1501
1502 if (o2net_wq) {
1503 cancel_delayed_work(&nn->nn_connect_expired);
1504 cancel_delayed_work(&nn->nn_connect_work);
1505 cancel_delayed_work(&nn->nn_still_up);
1506 flush_workqueue(o2net_wq);
1507 }
1508}
1509
1510static void o2net_hb_node_down_cb(struct o2nm_node *node, int node_num,
1511 void *data)
1512{
1513 o2quo_hb_down(node_num);
1514
1515 if (node_num != o2nm_this_node())
1516 o2net_disconnect_node(node);
1517}
1518
1519static void o2net_hb_node_up_cb(struct o2nm_node *node, int node_num,
1520 void *data)
1521{
1522 struct o2net_node *nn = o2net_nn_from_num(node_num);
1523
1524 o2quo_hb_up(node_num);
1525
1526 /* ensure an immediate connect attempt */
1527 nn->nn_last_connect_attempt = jiffies -
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +01001528 (msecs_to_jiffies(o2net_reconnect_delay(node)) + 1);
Zach Brown98211482005-12-15 14:31:23 -08001529
1530 if (node_num != o2nm_this_node()) {
1531 /* heartbeat doesn't work unless a local node number is
1532 * configured and doing so brings up the o2net_wq, so we can
1533 * use it.. */
1534 queue_delayed_work(o2net_wq, &nn->nn_connect_expired,
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +01001535 msecs_to_jiffies(o2net_idle_timeout(node)));
Zach Brown98211482005-12-15 14:31:23 -08001536
1537 /* believe it or not, accept and node hearbeating testing
1538 * can succeed for this node before we got here.. so
1539 * only use set_nn_state to clear the persistent error
1540 * if that hasn't already happened */
1541 spin_lock(&nn->nn_lock);
1542 if (nn->nn_persistent_error)
1543 o2net_set_nn_state(nn, NULL, 0, 0);
1544 spin_unlock(&nn->nn_lock);
1545 }
1546}
1547
1548void o2net_unregister_hb_callbacks(void)
1549{
1550 int ret;
1551
1552 ret = o2hb_unregister_callback(&o2net_hb_up);
1553 if (ret < 0)
1554 mlog(ML_ERROR, "Status return %d unregistering heartbeat up "
1555 "callback!\n", ret);
1556
1557 ret = o2hb_unregister_callback(&o2net_hb_down);
1558 if (ret < 0)
1559 mlog(ML_ERROR, "Status return %d unregistering heartbeat down "
1560 "callback!\n", ret);
1561}
1562
1563int o2net_register_hb_callbacks(void)
1564{
1565 int ret;
1566
1567 o2hb_setup_callback(&o2net_hb_down, O2HB_NODE_DOWN_CB,
1568 o2net_hb_node_down_cb, NULL, O2NET_HB_PRI);
1569 o2hb_setup_callback(&o2net_hb_up, O2HB_NODE_UP_CB,
1570 o2net_hb_node_up_cb, NULL, O2NET_HB_PRI);
1571
1572 ret = o2hb_register_callback(&o2net_hb_up);
1573 if (ret == 0)
1574 ret = o2hb_register_callback(&o2net_hb_down);
1575
1576 if (ret)
1577 o2net_unregister_hb_callbacks();
1578
1579 return ret;
1580}
1581
1582/* ------------------------------------------------------------ */
1583
1584static int o2net_accept_one(struct socket *sock)
1585{
1586 int ret, slen;
1587 struct sockaddr_in sin;
1588 struct socket *new_sock = NULL;
1589 struct o2nm_node *node = NULL;
1590 struct o2net_sock_container *sc = NULL;
1591 struct o2net_node *nn;
1592
1593 BUG_ON(sock == NULL);
1594 ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
1595 sock->sk->sk_protocol, &new_sock);
1596 if (ret)
1597 goto out;
1598
1599 new_sock->type = sock->type;
1600 new_sock->ops = sock->ops;
1601 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
1602 if (ret < 0)
1603 goto out;
1604
1605 new_sock->sk->sk_allocation = GFP_ATOMIC;
1606
1607 ret = o2net_set_nodelay(new_sock);
1608 if (ret) {
1609 mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
1610 goto out;
1611 }
1612
1613 slen = sizeof(sin);
1614 ret = new_sock->ops->getname(new_sock, (struct sockaddr *) &sin,
1615 &slen, 1);
1616 if (ret < 0)
1617 goto out;
1618
1619 node = o2nm_get_node_by_ip((__force __be32)sin.sin_addr.s_addr);
1620 if (node == NULL) {
1621 mlog(ML_NOTICE, "attempt to connect from unknown node at "
1622 "%u.%u.%u.%u:%d\n", NIPQUAD(sin.sin_addr.s_addr),
1623 ntohs((__force __be16)sin.sin_port));
1624 ret = -EINVAL;
1625 goto out;
1626 }
1627
1628 if (o2nm_this_node() > node->nd_num) {
1629 mlog(ML_NOTICE, "unexpected connect attempted from a lower "
1630 "numbered node '%s' at " "%u.%u.%u.%u:%d with num %u\n",
1631 node->nd_name, NIPQUAD(sin.sin_addr.s_addr),
1632 ntohs((__force __be16)sin.sin_port), node->nd_num);
1633 ret = -EINVAL;
1634 goto out;
1635 }
1636
1637 /* this happens all the time when the other node sees our heartbeat
1638 * and tries to connect before we see their heartbeat */
1639 if (!o2hb_check_node_heartbeating_from_callback(node->nd_num)) {
1640 mlog(ML_CONN, "attempt to connect from node '%s' at "
1641 "%u.%u.%u.%u:%d but it isn't heartbeating\n",
1642 node->nd_name, NIPQUAD(sin.sin_addr.s_addr),
1643 ntohs((__force __be16)sin.sin_port));
1644 ret = -EINVAL;
1645 goto out;
1646 }
1647
1648 nn = o2net_nn_from_num(node->nd_num);
1649
1650 spin_lock(&nn->nn_lock);
1651 if (nn->nn_sc)
1652 ret = -EBUSY;
1653 else
1654 ret = 0;
1655 spin_unlock(&nn->nn_lock);
1656 if (ret) {
1657 mlog(ML_NOTICE, "attempt to connect from node '%s' at "
1658 "%u.%u.%u.%u:%d but it already has an open connection\n",
1659 node->nd_name, NIPQUAD(sin.sin_addr.s_addr),
1660 ntohs((__force __be16)sin.sin_port));
1661 goto out;
1662 }
1663
1664 sc = sc_alloc(node);
1665 if (sc == NULL) {
1666 ret = -ENOMEM;
1667 goto out;
1668 }
1669
1670 sc->sc_sock = new_sock;
1671 new_sock = NULL;
1672
1673 spin_lock(&nn->nn_lock);
1674 o2net_set_nn_state(nn, sc, 0, 0);
1675 spin_unlock(&nn->nn_lock);
1676
1677 o2net_register_callbacks(sc->sc_sock->sk, sc);
1678 o2net_sc_queue_work(sc, &sc->sc_rx_work);
1679
1680 o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand));
1681
1682out:
1683 if (new_sock)
1684 sock_release(new_sock);
1685 if (node)
1686 o2nm_node_put(node);
1687 if (sc)
1688 sc_put(sc);
1689 return ret;
1690}
1691
David Howellsc4028952006-11-22 14:57:56 +00001692static void o2net_accept_many(struct work_struct *work)
Zach Brown98211482005-12-15 14:31:23 -08001693{
David Howellsc4028952006-11-22 14:57:56 +00001694 struct socket *sock = o2net_listen_sock;
Zach Brown98211482005-12-15 14:31:23 -08001695 while (o2net_accept_one(sock) == 0)
1696 cond_resched();
1697}
1698
1699static void o2net_listen_data_ready(struct sock *sk, int bytes)
1700{
1701 void (*ready)(struct sock *sk, int bytes);
1702
1703 read_lock(&sk->sk_callback_lock);
1704 ready = sk->sk_user_data;
1705 if (ready == NULL) { /* check for teardown race */
1706 ready = sk->sk_data_ready;
1707 goto out;
1708 }
1709
1710 /* ->sk_data_ready is also called for a newly established child socket
1711 * before it has been accepted and the acceptor has set up their
1712 * data_ready.. we only want to queue listen work for our listening
1713 * socket */
1714 if (sk->sk_state == TCP_LISTEN) {
1715 mlog(ML_TCP, "bytes: %d\n", bytes);
1716 queue_work(o2net_wq, &o2net_listen_work);
1717 }
1718
1719out:
1720 read_unlock(&sk->sk_callback_lock);
1721 ready(sk, bytes);
1722}
1723
1724static int o2net_open_listening_sock(__be16 port)
1725{
1726 struct socket *sock = NULL;
1727 int ret;
1728 struct sockaddr_in sin = {
1729 .sin_family = PF_INET,
1730 .sin_addr = { .s_addr = (__force u32)htonl(INADDR_ANY) },
1731 .sin_port = (__force u16)port,
1732 };
1733
1734 ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
1735 if (ret < 0) {
1736 mlog(ML_ERROR, "unable to create socket, ret=%d\n", ret);
1737 goto out;
1738 }
1739
1740 sock->sk->sk_allocation = GFP_ATOMIC;
1741
1742 write_lock_bh(&sock->sk->sk_callback_lock);
1743 sock->sk->sk_user_data = sock->sk->sk_data_ready;
1744 sock->sk->sk_data_ready = o2net_listen_data_ready;
1745 write_unlock_bh(&sock->sk->sk_callback_lock);
1746
1747 o2net_listen_sock = sock;
David Howellsc4028952006-11-22 14:57:56 +00001748 INIT_WORK(&o2net_listen_work, o2net_accept_many);
Zach Brown98211482005-12-15 14:31:23 -08001749
1750 sock->sk->sk_reuse = 1;
1751 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
1752 if (ret < 0) {
1753 mlog(ML_ERROR, "unable to bind socket to port %d, ret=%d\n",
1754 ntohs(port), ret);
1755 goto out;
1756 }
1757
1758 ret = sock->ops->listen(sock, 64);
1759 if (ret < 0) {
1760 mlog(ML_ERROR, "unable to listen on port %d, ret=%d\n",
1761 ntohs(port), ret);
1762 }
1763
1764out:
1765 if (ret) {
1766 o2net_listen_sock = NULL;
1767 if (sock)
1768 sock_release(sock);
1769 }
1770 return ret;
1771}
1772
1773/*
1774 * called from node manager when we should bring up our network listening
1775 * socket. node manager handles all the serialization to only call this
1776 * once and to match it with o2net_stop_listening(). note,
1777 * o2nm_this_node() doesn't work yet as we're being called while it
1778 * is being set up.
1779 */
1780int o2net_start_listening(struct o2nm_node *node)
1781{
1782 int ret = 0;
1783
1784 BUG_ON(o2net_wq != NULL);
1785 BUG_ON(o2net_listen_sock != NULL);
1786
1787 mlog(ML_KTHREAD, "starting o2net thread...\n");
1788 o2net_wq = create_singlethread_workqueue("o2net");
1789 if (o2net_wq == NULL) {
1790 mlog(ML_ERROR, "unable to launch o2net thread\n");
1791 return -ENOMEM; /* ? */
1792 }
1793
1794 ret = o2net_open_listening_sock(node->nd_ipv4_port);
1795 if (ret) {
1796 destroy_workqueue(o2net_wq);
1797 o2net_wq = NULL;
1798 } else
1799 o2quo_conn_up(node->nd_num);
1800
1801 return ret;
1802}
1803
1804/* again, o2nm_this_node() doesn't work here as we're involved in
1805 * tearing it down */
1806void o2net_stop_listening(struct o2nm_node *node)
1807{
1808 struct socket *sock = o2net_listen_sock;
1809 size_t i;
1810
1811 BUG_ON(o2net_wq == NULL);
1812 BUG_ON(o2net_listen_sock == NULL);
1813
1814 /* stop the listening socket from generating work */
1815 write_lock_bh(&sock->sk->sk_callback_lock);
1816 sock->sk->sk_data_ready = sock->sk->sk_user_data;
1817 sock->sk->sk_user_data = NULL;
1818 write_unlock_bh(&sock->sk->sk_callback_lock);
1819
1820 for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) {
1821 struct o2nm_node *node = o2nm_get_node_by_num(i);
1822 if (node) {
1823 o2net_disconnect_node(node);
1824 o2nm_node_put(node);
1825 }
1826 }
1827
1828 /* finish all work and tear down the work queue */
1829 mlog(ML_KTHREAD, "waiting for o2net thread to exit....\n");
1830 destroy_workqueue(o2net_wq);
1831 o2net_wq = NULL;
1832
1833 sock_release(o2net_listen_sock);
1834 o2net_listen_sock = NULL;
1835
1836 o2quo_conn_err(node->nd_num);
1837}
1838
1839/* ------------------------------------------------------------ */
1840
1841int o2net_init(void)
1842{
1843 unsigned long i;
1844
1845 o2quo_init();
1846
1847 o2net_hand = kcalloc(1, sizeof(struct o2net_handshake), GFP_KERNEL);
1848 o2net_keep_req = kcalloc(1, sizeof(struct o2net_msg), GFP_KERNEL);
1849 o2net_keep_resp = kcalloc(1, sizeof(struct o2net_msg), GFP_KERNEL);
1850 if (!o2net_hand || !o2net_keep_req || !o2net_keep_resp) {
1851 kfree(o2net_hand);
1852 kfree(o2net_keep_req);
1853 kfree(o2net_keep_resp);
1854 return -ENOMEM;
1855 }
1856
1857 o2net_hand->protocol_version = cpu_to_be64(O2NET_PROTOCOL_VERSION);
1858 o2net_hand->connector_id = cpu_to_be64(1);
1859
1860 o2net_keep_req->magic = cpu_to_be16(O2NET_MSG_KEEP_REQ_MAGIC);
1861 o2net_keep_resp->magic = cpu_to_be16(O2NET_MSG_KEEP_RESP_MAGIC);
1862
1863 for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) {
1864 struct o2net_node *nn = o2net_nn_from_num(i);
1865
1866 spin_lock_init(&nn->nn_lock);
David Howellsc4028952006-11-22 14:57:56 +00001867 INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect);
1868 INIT_DELAYED_WORK(&nn->nn_connect_expired,
1869 o2net_connect_expired);
1870 INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up);
Zach Brown98211482005-12-15 14:31:23 -08001871 /* until we see hb from a node we'll return einval */
1872 nn->nn_persistent_error = -ENOTCONN;
1873 init_waitqueue_head(&nn->nn_sc_wq);
1874 idr_init(&nn->nn_status_idr);
1875 INIT_LIST_HEAD(&nn->nn_status_list);
1876 }
1877
1878 return 0;
1879}
1880
1881void o2net_exit(void)
1882{
1883 o2quo_exit();
1884 kfree(o2net_hand);
1885 kfree(o2net_keep_req);
1886 kfree(o2net_keep_resp);
1887}