blob: af2070da308b87904c468988ba94cae0ee7cf8ae [file] [log] [blame]
Kurt Hackel0c83ed82005-12-15 14:31:23 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * Copyright (C) 2004, 2005 Oracle. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/sysctl.h>
25#include <linux/configfs.h>
26
27#include "endian.h"
28#include "tcp.h"
29#include "nodemanager.h"
30#include "heartbeat.h"
31#include "masklog.h"
32#include "sys.h"
33#include "ver.h"
34
35/* for now we operate under the assertion that there can be only one
36 * cluster active at a time. Changing this will require trickling
37 * cluster references throughout where nodes are looked up */
Andrew Beekhof296b75e2006-12-04 14:04:53 +010038struct o2nm_cluster *o2nm_single_cluster = NULL;
Kurt Hackel0c83ed82005-12-15 14:31:23 -080039
40#define OCFS2_MAX_HB_CTL_PATH 256
41static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
42
43static ctl_table ocfs2_nm_table[] = {
44 {
45 .ctl_name = 1,
46 .procname = "hb_ctl_path",
47 .data = ocfs2_hb_ctl_path,
48 .maxlen = OCFS2_MAX_HB_CTL_PATH,
49 .mode = 0644,
50 .proc_handler = &proc_dostring,
51 .strategy = &sysctl_string,
52 },
53 { .ctl_name = 0 }
54};
55
56static ctl_table ocfs2_mod_table[] = {
57 {
Eric W. Biederman0e030362007-02-14 00:33:57 -080058 .ctl_name = FS_OCFS2_NM,
Kurt Hackel0c83ed82005-12-15 14:31:23 -080059 .procname = "nm",
60 .data = NULL,
61 .maxlen = 0,
62 .mode = 0555,
63 .child = ocfs2_nm_table
64 },
65 { .ctl_name = 0}
66};
67
68static ctl_table ocfs2_kern_table[] = {
69 {
Eric W. Biederman0e030362007-02-14 00:33:57 -080070 .ctl_name = FS_OCFS2,
Kurt Hackel0c83ed82005-12-15 14:31:23 -080071 .procname = "ocfs2",
72 .data = NULL,
73 .maxlen = 0,
74 .mode = 0555,
75 .child = ocfs2_mod_table
76 },
77 { .ctl_name = 0}
78};
79
80static ctl_table ocfs2_root_table[] = {
81 {
82 .ctl_name = CTL_FS,
83 .procname = "fs",
84 .data = NULL,
85 .maxlen = 0,
86 .mode = 0555,
87 .child = ocfs2_kern_table
88 },
89 { .ctl_name = 0 }
90};
91
92static struct ctl_table_header *ocfs2_table_header = NULL;
93
94const char *o2nm_get_hb_ctl_path(void)
95{
96 return ocfs2_hb_ctl_path;
97}
98EXPORT_SYMBOL_GPL(o2nm_get_hb_ctl_path);
99
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800100struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
101{
102 struct o2nm_node *node = NULL;
103
104 if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL)
105 goto out;
106
107 read_lock(&o2nm_single_cluster->cl_nodes_lock);
108 node = o2nm_single_cluster->cl_nodes[node_num];
109 if (node)
110 config_item_get(&node->nd_item);
111 read_unlock(&o2nm_single_cluster->cl_nodes_lock);
112out:
113 return node;
114}
115EXPORT_SYMBOL_GPL(o2nm_get_node_by_num);
116
117int o2nm_configured_node_map(unsigned long *map, unsigned bytes)
118{
119 struct o2nm_cluster *cluster = o2nm_single_cluster;
120
121 BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
122
123 if (cluster == NULL)
124 return -EINVAL;
125
126 read_lock(&cluster->cl_nodes_lock);
127 memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap));
128 read_unlock(&cluster->cl_nodes_lock);
129
130 return 0;
131}
132EXPORT_SYMBOL_GPL(o2nm_configured_node_map);
133
134static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster,
135 __be32 ip_needle,
136 struct rb_node ***ret_p,
137 struct rb_node **ret_parent)
138{
139 struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
140 struct rb_node *parent = NULL;
141 struct o2nm_node *node, *ret = NULL;
142
143 while (*p) {
Akinobu Mita79cd22d2006-10-12 14:29:33 +0900144 int cmp;
145
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800146 parent = *p;
147 node = rb_entry(parent, struct o2nm_node, nd_ip_node);
148
Akinobu Mita79cd22d2006-10-12 14:29:33 +0900149 cmp = memcmp(&ip_needle, &node->nd_ipv4_address,
150 sizeof(ip_needle));
151 if (cmp < 0)
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800152 p = &(*p)->rb_left;
Akinobu Mita79cd22d2006-10-12 14:29:33 +0900153 else if (cmp > 0)
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800154 p = &(*p)->rb_right;
155 else {
156 ret = node;
157 break;
158 }
159 }
160
161 if (ret_p != NULL)
162 *ret_p = p;
163 if (ret_parent != NULL)
164 *ret_parent = parent;
165
166 return ret;
167}
168
169struct o2nm_node *o2nm_get_node_by_ip(__be32 addr)
170{
171 struct o2nm_node *node = NULL;
172 struct o2nm_cluster *cluster = o2nm_single_cluster;
173
174 if (cluster == NULL)
175 goto out;
176
177 read_lock(&cluster->cl_nodes_lock);
178 node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
179 if (node)
180 config_item_get(&node->nd_item);
181 read_unlock(&cluster->cl_nodes_lock);
182
183out:
184 return node;
185}
186EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip);
187
188void o2nm_node_put(struct o2nm_node *node)
189{
190 config_item_put(&node->nd_item);
191}
192EXPORT_SYMBOL_GPL(o2nm_node_put);
193
194void o2nm_node_get(struct o2nm_node *node)
195{
196 config_item_get(&node->nd_item);
197}
198EXPORT_SYMBOL_GPL(o2nm_node_get);
199
200u8 o2nm_this_node(void)
201{
202 u8 node_num = O2NM_MAX_NODES;
203
204 if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local)
205 node_num = o2nm_single_cluster->cl_local_node;
206
207 return node_num;
208}
209EXPORT_SYMBOL_GPL(o2nm_this_node);
210
211/* node configfs bits */
212
213static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item)
214{
215 return item ?
216 container_of(to_config_group(item), struct o2nm_cluster,
217 cl_group)
218 : NULL;
219}
220
221static struct o2nm_node *to_o2nm_node(struct config_item *item)
222{
223 return item ? container_of(item, struct o2nm_node, nd_item) : NULL;
224}
225
226static void o2nm_node_release(struct config_item *item)
227{
228 struct o2nm_node *node = to_o2nm_node(item);
229 kfree(node);
230}
231
232static ssize_t o2nm_node_num_read(struct o2nm_node *node, char *page)
233{
234 return sprintf(page, "%d\n", node->nd_num);
235}
236
237static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
238{
239 /* through the first node_set .parent
240 * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
241 return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
242}
243
244enum {
245 O2NM_NODE_ATTR_NUM = 0,
246 O2NM_NODE_ATTR_PORT,
247 O2NM_NODE_ATTR_ADDRESS,
248 O2NM_NODE_ATTR_LOCAL,
249};
250
251static ssize_t o2nm_node_num_write(struct o2nm_node *node, const char *page,
252 size_t count)
253{
254 struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
255 unsigned long tmp;
256 char *p = (char *)page;
257
258 tmp = simple_strtoul(p, &p, 0);
259 if (!p || (*p && (*p != '\n')))
260 return -EINVAL;
261
262 if (tmp >= O2NM_MAX_NODES)
263 return -ERANGE;
264
265 /* once we're in the cl_nodes tree networking can look us up by
266 * node number and try to use our address and port attributes
267 * to connect to this node.. make sure that they've been set
268 * before writing the node attribute? */
269 if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
270 !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
271 return -EINVAL; /* XXX */
272
273 write_lock(&cluster->cl_nodes_lock);
274 if (cluster->cl_nodes[tmp])
275 p = NULL;
276 else {
277 cluster->cl_nodes[tmp] = node;
278 node->nd_num = tmp;
279 set_bit(tmp, cluster->cl_nodes_bitmap);
280 }
281 write_unlock(&cluster->cl_nodes_lock);
282 if (p == NULL)
283 return -EEXIST;
284
285 return count;
286}
287static ssize_t o2nm_node_ipv4_port_read(struct o2nm_node *node, char *page)
288{
289 return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port));
290}
291
292static ssize_t o2nm_node_ipv4_port_write(struct o2nm_node *node,
293 const char *page, size_t count)
294{
295 unsigned long tmp;
296 char *p = (char *)page;
297
298 tmp = simple_strtoul(p, &p, 0);
299 if (!p || (*p && (*p != '\n')))
300 return -EINVAL;
301
302 if (tmp == 0)
303 return -EINVAL;
304 if (tmp >= (u16)-1)
305 return -ERANGE;
306
307 node->nd_ipv4_port = htons(tmp);
308
309 return count;
310}
311
312static ssize_t o2nm_node_ipv4_address_read(struct o2nm_node *node, char *page)
313{
314 return sprintf(page, "%u.%u.%u.%u\n", NIPQUAD(node->nd_ipv4_address));
315}
316
317static ssize_t o2nm_node_ipv4_address_write(struct o2nm_node *node,
318 const char *page,
319 size_t count)
320{
321 struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
322 int ret, i;
323 struct rb_node **p, *parent;
324 unsigned int octets[4];
325 __be32 ipv4_addr = 0;
326
327 ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
328 &octets[1], &octets[0]);
329 if (ret != 4)
330 return -EINVAL;
331
332 for (i = 0; i < ARRAY_SIZE(octets); i++) {
333 if (octets[i] > 255)
334 return -ERANGE;
335 be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
336 }
337
338 ret = 0;
339 write_lock(&cluster->cl_nodes_lock);
340 if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
341 ret = -EEXIST;
342 else {
343 rb_link_node(&node->nd_ip_node, parent, p);
344 rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
345 }
346 write_unlock(&cluster->cl_nodes_lock);
347 if (ret)
348 return ret;
349
350 memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
351
352 return count;
353}
354
355static ssize_t o2nm_node_local_read(struct o2nm_node *node, char *page)
356{
357 return sprintf(page, "%d\n", node->nd_local);
358}
359
360static ssize_t o2nm_node_local_write(struct o2nm_node *node, const char *page,
361 size_t count)
362{
363 struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
364 unsigned long tmp;
365 char *p = (char *)page;
366 ssize_t ret;
367
368 tmp = simple_strtoul(p, &p, 0);
369 if (!p || (*p && (*p != '\n')))
370 return -EINVAL;
371
372 tmp = !!tmp; /* boolean of whether this node wants to be local */
373
374 /* setting local turns on networking rx for now so we require having
375 * set everything else first */
376 if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
377 !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
378 !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
379 return -EINVAL; /* XXX */
380
381 /* the only failure case is trying to set a new local node
382 * when a different one is already set */
383 if (tmp && tmp == cluster->cl_has_local &&
384 cluster->cl_local_node != node->nd_num)
385 return -EBUSY;
386
387 /* bring up the rx thread if we're setting the new local node. */
388 if (tmp && !cluster->cl_has_local) {
389 ret = o2net_start_listening(node);
390 if (ret)
391 return ret;
392 }
393
394 if (!tmp && cluster->cl_has_local &&
395 cluster->cl_local_node == node->nd_num) {
396 o2net_stop_listening(node);
397 cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
398 }
399
400 node->nd_local = tmp;
401 if (node->nd_local) {
402 cluster->cl_has_local = tmp;
403 cluster->cl_local_node = node->nd_num;
404 }
405
406 return count;
407}
408
409struct o2nm_node_attribute {
410 struct configfs_attribute attr;
411 ssize_t (*show)(struct o2nm_node *, char *);
412 ssize_t (*store)(struct o2nm_node *, const char *, size_t);
413};
414
415static struct o2nm_node_attribute o2nm_node_attr_num = {
416 .attr = { .ca_owner = THIS_MODULE,
417 .ca_name = "num",
418 .ca_mode = S_IRUGO | S_IWUSR },
419 .show = o2nm_node_num_read,
420 .store = o2nm_node_num_write,
421};
422
423static struct o2nm_node_attribute o2nm_node_attr_ipv4_port = {
424 .attr = { .ca_owner = THIS_MODULE,
425 .ca_name = "ipv4_port",
426 .ca_mode = S_IRUGO | S_IWUSR },
427 .show = o2nm_node_ipv4_port_read,
428 .store = o2nm_node_ipv4_port_write,
429};
430
431static struct o2nm_node_attribute o2nm_node_attr_ipv4_address = {
432 .attr = { .ca_owner = THIS_MODULE,
433 .ca_name = "ipv4_address",
434 .ca_mode = S_IRUGO | S_IWUSR },
435 .show = o2nm_node_ipv4_address_read,
436 .store = o2nm_node_ipv4_address_write,
437};
438
439static struct o2nm_node_attribute o2nm_node_attr_local = {
440 .attr = { .ca_owner = THIS_MODULE,
441 .ca_name = "local",
442 .ca_mode = S_IRUGO | S_IWUSR },
443 .show = o2nm_node_local_read,
444 .store = o2nm_node_local_write,
445};
446
447static struct configfs_attribute *o2nm_node_attrs[] = {
448 [O2NM_NODE_ATTR_NUM] = &o2nm_node_attr_num.attr,
449 [O2NM_NODE_ATTR_PORT] = &o2nm_node_attr_ipv4_port.attr,
450 [O2NM_NODE_ATTR_ADDRESS] = &o2nm_node_attr_ipv4_address.attr,
451 [O2NM_NODE_ATTR_LOCAL] = &o2nm_node_attr_local.attr,
452 NULL,
453};
454
455static int o2nm_attr_index(struct configfs_attribute *attr)
456{
457 int i;
458 for (i = 0; i < ARRAY_SIZE(o2nm_node_attrs); i++) {
459 if (attr == o2nm_node_attrs[i])
460 return i;
461 }
462 BUG();
463 return 0;
464}
465
466static ssize_t o2nm_node_show(struct config_item *item,
467 struct configfs_attribute *attr,
468 char *page)
469{
470 struct o2nm_node *node = to_o2nm_node(item);
471 struct o2nm_node_attribute *o2nm_node_attr =
472 container_of(attr, struct o2nm_node_attribute, attr);
473 ssize_t ret = 0;
474
475 if (o2nm_node_attr->show)
476 ret = o2nm_node_attr->show(node, page);
477 return ret;
478}
479
480static ssize_t o2nm_node_store(struct config_item *item,
481 struct configfs_attribute *attr,
482 const char *page, size_t count)
483{
484 struct o2nm_node *node = to_o2nm_node(item);
485 struct o2nm_node_attribute *o2nm_node_attr =
486 container_of(attr, struct o2nm_node_attribute, attr);
487 ssize_t ret;
488 int attr_index = o2nm_attr_index(attr);
489
490 if (o2nm_node_attr->store == NULL) {
491 ret = -EINVAL;
492 goto out;
493 }
494
495 if (test_bit(attr_index, &node->nd_set_attributes))
496 return -EBUSY;
497
498 ret = o2nm_node_attr->store(node, page, count);
499 if (ret < count)
500 goto out;
501
502 set_bit(attr_index, &node->nd_set_attributes);
503out:
504 return ret;
505}
506
507static struct configfs_item_operations o2nm_node_item_ops = {
508 .release = o2nm_node_release,
509 .show_attribute = o2nm_node_show,
510 .store_attribute = o2nm_node_store,
511};
512
513static struct config_item_type o2nm_node_type = {
514 .ct_item_ops = &o2nm_node_item_ops,
515 .ct_attrs = o2nm_node_attrs,
516 .ct_owner = THIS_MODULE,
517};
518
519/* node set */
520
521struct o2nm_node_group {
522 struct config_group ns_group;
523 /* some stuff? */
524};
525
526#if 0
527static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group)
528{
529 return group ?
530 container_of(group, struct o2nm_node_group, ns_group)
531 : NULL;
532}
533#endif
534
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100535struct o2nm_cluster_attribute {
536 struct configfs_attribute attr;
537 ssize_t (*show)(struct o2nm_cluster *, char *);
538 ssize_t (*store)(struct o2nm_cluster *, const char *, size_t);
539};
540
541static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count,
542 unsigned int *val)
543{
544 unsigned long tmp;
545 char *p = (char *)page;
546
547 tmp = simple_strtoul(p, &p, 0);
548 if (!p || (*p && (*p != '\n')))
549 return -EINVAL;
550
551 if (tmp == 0)
552 return -EINVAL;
553 if (tmp >= (u32)-1)
554 return -ERANGE;
555
556 *val = tmp;
557
558 return count;
559}
560
561static ssize_t o2nm_cluster_attr_idle_timeout_ms_read(
562 struct o2nm_cluster *cluster, char *page)
563{
564 return sprintf(page, "%u\n", cluster->cl_idle_timeout_ms);
565}
566
567static ssize_t o2nm_cluster_attr_idle_timeout_ms_write(
568 struct o2nm_cluster *cluster, const char *page, size_t count)
569{
570 ssize_t ret;
571 unsigned int val;
572
573 ret = o2nm_cluster_attr_write(page, count, &val);
574
575 if (ret > 0) {
Andrew Beekhof828ae6a2006-12-04 14:04:55 +0100576 if (cluster->cl_idle_timeout_ms != val
577 && o2net_num_connected_peers()) {
578 mlog(ML_NOTICE,
579 "o2net: cannot change idle timeout after "
580 "the first peer has agreed to it."
581 " %d connected peers\n",
582 o2net_num_connected_peers());
583 ret = -EINVAL;
584 } else if (val <= cluster->cl_keepalive_delay_ms) {
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100585 mlog(ML_NOTICE, "o2net: idle timeout must be larger "
586 "than keepalive delay\n");
Andrew Beekhof828ae6a2006-12-04 14:04:55 +0100587 ret = -EINVAL;
588 } else {
589 cluster->cl_idle_timeout_ms = val;
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100590 }
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100591 }
592
593 return ret;
594}
595
596static ssize_t o2nm_cluster_attr_keepalive_delay_ms_read(
597 struct o2nm_cluster *cluster, char *page)
598{
599 return sprintf(page, "%u\n", cluster->cl_keepalive_delay_ms);
600}
601
602static ssize_t o2nm_cluster_attr_keepalive_delay_ms_write(
603 struct o2nm_cluster *cluster, const char *page, size_t count)
604{
605 ssize_t ret;
606 unsigned int val;
607
608 ret = o2nm_cluster_attr_write(page, count, &val);
609
610 if (ret > 0) {
Andrew Beekhof828ae6a2006-12-04 14:04:55 +0100611 if (cluster->cl_keepalive_delay_ms != val
612 && o2net_num_connected_peers()) {
613 mlog(ML_NOTICE,
614 "o2net: cannot change keepalive delay after"
615 " the first peer has agreed to it."
616 " %d connected peers\n",
617 o2net_num_connected_peers());
618 ret = -EINVAL;
619 } else if (val >= cluster->cl_idle_timeout_ms) {
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100620 mlog(ML_NOTICE, "o2net: keepalive delay must be "
621 "smaller than idle timeout\n");
Andrew Beekhof828ae6a2006-12-04 14:04:55 +0100622 ret = -EINVAL;
623 } else {
624 cluster->cl_keepalive_delay_ms = val;
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100625 }
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100626 }
627
628 return ret;
629}
630
631static ssize_t o2nm_cluster_attr_reconnect_delay_ms_read(
632 struct o2nm_cluster *cluster, char *page)
633{
634 return sprintf(page, "%u\n", cluster->cl_reconnect_delay_ms);
635}
636
637static ssize_t o2nm_cluster_attr_reconnect_delay_ms_write(
638 struct o2nm_cluster *cluster, const char *page, size_t count)
639{
640 return o2nm_cluster_attr_write(page, count,
641 &cluster->cl_reconnect_delay_ms);
642}
643static struct o2nm_cluster_attribute o2nm_cluster_attr_idle_timeout_ms = {
644 .attr = { .ca_owner = THIS_MODULE,
645 .ca_name = "idle_timeout_ms",
646 .ca_mode = S_IRUGO | S_IWUSR },
647 .show = o2nm_cluster_attr_idle_timeout_ms_read,
648 .store = o2nm_cluster_attr_idle_timeout_ms_write,
649};
650
651static struct o2nm_cluster_attribute o2nm_cluster_attr_keepalive_delay_ms = {
652 .attr = { .ca_owner = THIS_MODULE,
653 .ca_name = "keepalive_delay_ms",
654 .ca_mode = S_IRUGO | S_IWUSR },
655 .show = o2nm_cluster_attr_keepalive_delay_ms_read,
656 .store = o2nm_cluster_attr_keepalive_delay_ms_write,
657};
658
659static struct o2nm_cluster_attribute o2nm_cluster_attr_reconnect_delay_ms = {
660 .attr = { .ca_owner = THIS_MODULE,
661 .ca_name = "reconnect_delay_ms",
662 .ca_mode = S_IRUGO | S_IWUSR },
663 .show = o2nm_cluster_attr_reconnect_delay_ms_read,
664 .store = o2nm_cluster_attr_reconnect_delay_ms_write,
665};
666
667static struct configfs_attribute *o2nm_cluster_attrs[] = {
668 &o2nm_cluster_attr_idle_timeout_ms.attr,
669 &o2nm_cluster_attr_keepalive_delay_ms.attr,
670 &o2nm_cluster_attr_reconnect_delay_ms.attr,
671 NULL,
672};
673static ssize_t o2nm_cluster_show(struct config_item *item,
674 struct configfs_attribute *attr,
675 char *page)
676{
677 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
678 struct o2nm_cluster_attribute *o2nm_cluster_attr =
679 container_of(attr, struct o2nm_cluster_attribute, attr);
680 ssize_t ret = 0;
681
682 if (o2nm_cluster_attr->show)
683 ret = o2nm_cluster_attr->show(cluster, page);
684 return ret;
685}
686
687static ssize_t o2nm_cluster_store(struct config_item *item,
688 struct configfs_attribute *attr,
689 const char *page, size_t count)
690{
691 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
692 struct o2nm_cluster_attribute *o2nm_cluster_attr =
693 container_of(attr, struct o2nm_cluster_attribute, attr);
694 ssize_t ret;
695
696 if (o2nm_cluster_attr->store == NULL) {
697 ret = -EINVAL;
698 goto out;
699 }
700
701 ret = o2nm_cluster_attr->store(cluster, page, count);
702 if (ret < count)
703 goto out;
704out:
705 return ret;
706}
707
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800708static struct config_item *o2nm_node_group_make_item(struct config_group *group,
709 const char *name)
710{
711 struct o2nm_node *node = NULL;
712 struct config_item *ret = NULL;
713
714 if (strlen(name) > O2NM_MAX_NAME_LEN)
715 goto out; /* ENAMETOOLONG */
716
Robert P. J. Daycd861282006-12-13 00:34:52 -0800717 node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL);
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800718 if (node == NULL)
719 goto out; /* ENOMEM */
720
721 strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
722 config_item_init_type_name(&node->nd_item, name, &o2nm_node_type);
723 spin_lock_init(&node->nd_lock);
724
725 ret = &node->nd_item;
726
727out:
728 if (ret == NULL)
729 kfree(node);
730
731 return ret;
732}
733
734static void o2nm_node_group_drop_item(struct config_group *group,
735 struct config_item *item)
736{
737 struct o2nm_node *node = to_o2nm_node(item);
738 struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
739
740 o2net_disconnect_node(node);
741
742 if (cluster->cl_has_local &&
743 (cluster->cl_local_node == node->nd_num)) {
744 cluster->cl_has_local = 0;
745 cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
746 o2net_stop_listening(node);
747 }
748
749 /* XXX call into net to stop this node from trading messages */
750
751 write_lock(&cluster->cl_nodes_lock);
752
753 /* XXX sloppy */
754 if (node->nd_ipv4_address)
755 rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
756
757 /* nd_num might be 0 if the node number hasn't been set.. */
758 if (cluster->cl_nodes[node->nd_num] == node) {
759 cluster->cl_nodes[node->nd_num] = NULL;
760 clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
761 }
762 write_unlock(&cluster->cl_nodes_lock);
763
764 config_item_put(item);
765}
766
767static struct configfs_group_operations o2nm_node_group_group_ops = {
768 .make_item = o2nm_node_group_make_item,
769 .drop_item = o2nm_node_group_drop_item,
770};
771
772static struct config_item_type o2nm_node_group_type = {
773 .ct_group_ops = &o2nm_node_group_group_ops,
774 .ct_owner = THIS_MODULE,
775};
776
777/* cluster */
778
779static void o2nm_cluster_release(struct config_item *item)
780{
781 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
782
783 kfree(cluster->cl_group.default_groups);
784 kfree(cluster);
785}
786
787static struct configfs_item_operations o2nm_cluster_item_ops = {
788 .release = o2nm_cluster_release,
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100789 .show_attribute = o2nm_cluster_show,
790 .store_attribute = o2nm_cluster_store,
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800791};
792
793static struct config_item_type o2nm_cluster_type = {
794 .ct_item_ops = &o2nm_cluster_item_ops,
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100795 .ct_attrs = o2nm_cluster_attrs,
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800796 .ct_owner = THIS_MODULE,
797};
798
799/* cluster set */
800
801struct o2nm_cluster_group {
802 struct configfs_subsystem cs_subsys;
803 /* some stuff? */
804};
805
806#if 0
807static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group)
808{
809 return group ?
810 container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys)
811 : NULL;
812}
813#endif
814
815static struct config_group *o2nm_cluster_group_make_group(struct config_group *group,
816 const char *name)
817{
818 struct o2nm_cluster *cluster = NULL;
819 struct o2nm_node_group *ns = NULL;
820 struct config_group *o2hb_group = NULL, *ret = NULL;
821 void *defs = NULL;
822
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800823 /* this runs under the parent dir's i_mutex; there can be only
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800824 * one caller in here at a time */
825 if (o2nm_single_cluster)
826 goto out; /* ENOSPC */
827
Robert P. J. Daycd861282006-12-13 00:34:52 -0800828 cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL);
829 ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL);
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800830 defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
831 o2hb_group = o2hb_alloc_hb_set();
832 if (cluster == NULL || ns == NULL || o2hb_group == NULL || defs == NULL)
833 goto out;
834
835 config_group_init_type_name(&cluster->cl_group, name,
836 &o2nm_cluster_type);
837 config_group_init_type_name(&ns->ns_group, "node",
838 &o2nm_node_group_type);
839
840 cluster->cl_group.default_groups = defs;
841 cluster->cl_group.default_groups[0] = &ns->ns_group;
842 cluster->cl_group.default_groups[1] = o2hb_group;
843 cluster->cl_group.default_groups[2] = NULL;
844 rwlock_init(&cluster->cl_nodes_lock);
845 cluster->cl_node_ip_tree = RB_ROOT;
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100846 cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT;
847 cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT;
848 cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT;
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800849
850 ret = &cluster->cl_group;
851 o2nm_single_cluster = cluster;
852
853out:
854 if (ret == NULL) {
855 kfree(cluster);
856 kfree(ns);
857 o2hb_free_hb_set(o2hb_group);
858 kfree(defs);
859 }
860
861 return ret;
862}
863
864static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item)
865{
866 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
867 int i;
868 struct config_item *killme;
869
870 BUG_ON(o2nm_single_cluster != cluster);
871 o2nm_single_cluster = NULL;
872
873 for (i = 0; cluster->cl_group.default_groups[i]; i++) {
874 killme = &cluster->cl_group.default_groups[i]->cg_item;
875 cluster->cl_group.default_groups[i] = NULL;
876 config_item_put(killme);
877 }
878
879 config_item_put(item);
880}
881
882static struct configfs_group_operations o2nm_cluster_group_group_ops = {
883 .make_group = o2nm_cluster_group_make_group,
884 .drop_item = o2nm_cluster_group_drop_item,
885};
886
887static struct config_item_type o2nm_cluster_group_type = {
888 .ct_group_ops = &o2nm_cluster_group_group_ops,
889 .ct_owner = THIS_MODULE,
890};
891
892static struct o2nm_cluster_group o2nm_cluster_group = {
893 .cs_subsys = {
894 .su_group = {
895 .cg_item = {
896 .ci_namebuf = "cluster",
897 .ci_type = &o2nm_cluster_group_type,
898 },
899 },
900 },
901};
902
Joel Becker14829422007-06-14 21:40:49 -0700903int o2nm_depend_item(struct config_item *item)
904{
905 return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
906}
907
908void o2nm_undepend_item(struct config_item *item)
909{
910 configfs_undepend_item(&o2nm_cluster_group.cs_subsys, item);
911}
912
Joel Becker16c6a4f2007-06-19 11:34:03 -0700913int o2nm_depend_this_node(void)
914{
915 int ret = 0;
916 struct o2nm_node *local_node;
917
918 local_node = o2nm_get_node_by_num(o2nm_this_node());
919 if (!local_node) {
920 ret = -EINVAL;
921 goto out;
922 }
923
924 ret = o2nm_depend_item(&local_node->nd_item);
925 o2nm_node_put(local_node);
926
927out:
928 return ret;
929}
930
931void o2nm_undepend_this_node(void)
932{
933 struct o2nm_node *local_node;
934
935 local_node = o2nm_get_node_by_num(o2nm_this_node());
936 BUG_ON(!local_node);
937
938 o2nm_undepend_item(&local_node->nd_item);
939 o2nm_node_put(local_node);
940}
941
942
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800943static void __exit exit_o2nm(void)
944{
945 if (ocfs2_table_header)
946 unregister_sysctl_table(ocfs2_table_header);
947
948 /* XXX sync with hb callbacks and shut down hb? */
949 o2net_unregister_hb_callbacks();
950 configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
951 o2cb_sys_shutdown();
952
953 o2net_exit();
954}
955
956static int __init init_o2nm(void)
957{
958 int ret = -1;
959
960 cluster_print_version();
961
962 o2hb_init();
963 o2net_init();
964
Eric W. Biederman0b4d4142007-02-14 00:34:09 -0800965 ocfs2_table_header = register_sysctl_table(ocfs2_root_table);
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800966 if (!ocfs2_table_header) {
967 printk(KERN_ERR "nodemanager: unable to register sysctl\n");
968 ret = -ENOMEM; /* or something. */
Jeff Mahoney895928b2006-02-21 16:54:00 -0800969 goto out_o2net;
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800970 }
971
972 ret = o2net_register_hb_callbacks();
973 if (ret)
974 goto out_sysctl;
975
976 config_group_init(&o2nm_cluster_group.cs_subsys.su_group);
Joel Beckere6bd07a2007-07-06 23:33:17 -0700977 mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex);
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800978 ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys);
979 if (ret) {
980 printk(KERN_ERR "nodemanager: Registration returned %d\n", ret);
981 goto out_callbacks;
982 }
983
984 ret = o2cb_sys_init();
985 if (!ret)
986 goto out;
987
988 configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
989out_callbacks:
990 o2net_unregister_hb_callbacks();
991out_sysctl:
992 unregister_sysctl_table(ocfs2_table_header);
Jeff Mahoney895928b2006-02-21 16:54:00 -0800993out_o2net:
994 o2net_exit();
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800995out:
996 return ret;
997}
998
999MODULE_AUTHOR("Oracle");
1000MODULE_LICENSE("GPL");
1001
1002module_init(init_o2nm)
1003module_exit(exit_o2nm)