blob: cf9401e8cd0b221e4187dbd4624c05e2070a3d89 [file] [log] [blame]
Kurt Hackel0c83ed82005-12-15 14:31:23 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * Copyright (C) 2004, 2005 Oracle. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/sysctl.h>
25#include <linux/configfs.h>
26
Kurt Hackel0c83ed82005-12-15 14:31:23 -080027#include "tcp.h"
28#include "nodemanager.h"
29#include "heartbeat.h"
30#include "masklog.h"
31#include "sys.h"
32#include "ver.h"
33
34/* for now we operate under the assertion that there can be only one
35 * cluster active at a time. Changing this will require trickling
36 * cluster references throughout where nodes are looked up */
Andrew Beekhof296b75e2006-12-04 14:04:53 +010037struct o2nm_cluster *o2nm_single_cluster = NULL;
Kurt Hackel0c83ed82005-12-15 14:31:23 -080038
39#define OCFS2_MAX_HB_CTL_PATH 256
40static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
41
42static ctl_table ocfs2_nm_table[] = {
43 {
44 .ctl_name = 1,
45 .procname = "hb_ctl_path",
46 .data = ocfs2_hb_ctl_path,
47 .maxlen = OCFS2_MAX_HB_CTL_PATH,
48 .mode = 0644,
49 .proc_handler = &proc_dostring,
50 .strategy = &sysctl_string,
51 },
52 { .ctl_name = 0 }
53};
54
55static ctl_table ocfs2_mod_table[] = {
56 {
Eric W. Biederman0e030362007-02-14 00:33:57 -080057 .ctl_name = FS_OCFS2_NM,
Kurt Hackel0c83ed82005-12-15 14:31:23 -080058 .procname = "nm",
59 .data = NULL,
60 .maxlen = 0,
61 .mode = 0555,
62 .child = ocfs2_nm_table
63 },
64 { .ctl_name = 0}
65};
66
67static ctl_table ocfs2_kern_table[] = {
68 {
Eric W. Biederman0e030362007-02-14 00:33:57 -080069 .ctl_name = FS_OCFS2,
Kurt Hackel0c83ed82005-12-15 14:31:23 -080070 .procname = "ocfs2",
71 .data = NULL,
72 .maxlen = 0,
73 .mode = 0555,
74 .child = ocfs2_mod_table
75 },
76 { .ctl_name = 0}
77};
78
79static ctl_table ocfs2_root_table[] = {
80 {
81 .ctl_name = CTL_FS,
82 .procname = "fs",
83 .data = NULL,
84 .maxlen = 0,
85 .mode = 0555,
86 .child = ocfs2_kern_table
87 },
88 { .ctl_name = 0 }
89};
90
91static struct ctl_table_header *ocfs2_table_header = NULL;
92
93const char *o2nm_get_hb_ctl_path(void)
94{
95 return ocfs2_hb_ctl_path;
96}
97EXPORT_SYMBOL_GPL(o2nm_get_hb_ctl_path);
98
Kurt Hackel0c83ed82005-12-15 14:31:23 -080099struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
100{
101 struct o2nm_node *node = NULL;
102
103 if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL)
104 goto out;
105
106 read_lock(&o2nm_single_cluster->cl_nodes_lock);
107 node = o2nm_single_cluster->cl_nodes[node_num];
108 if (node)
109 config_item_get(&node->nd_item);
110 read_unlock(&o2nm_single_cluster->cl_nodes_lock);
111out:
112 return node;
113}
114EXPORT_SYMBOL_GPL(o2nm_get_node_by_num);
115
116int o2nm_configured_node_map(unsigned long *map, unsigned bytes)
117{
118 struct o2nm_cluster *cluster = o2nm_single_cluster;
119
120 BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
121
122 if (cluster == NULL)
123 return -EINVAL;
124
125 read_lock(&cluster->cl_nodes_lock);
126 memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap));
127 read_unlock(&cluster->cl_nodes_lock);
128
129 return 0;
130}
131EXPORT_SYMBOL_GPL(o2nm_configured_node_map);
132
133static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster,
134 __be32 ip_needle,
135 struct rb_node ***ret_p,
136 struct rb_node **ret_parent)
137{
138 struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
139 struct rb_node *parent = NULL;
140 struct o2nm_node *node, *ret = NULL;
141
142 while (*p) {
Akinobu Mita79cd22d2006-10-12 14:29:33 +0900143 int cmp;
144
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800145 parent = *p;
146 node = rb_entry(parent, struct o2nm_node, nd_ip_node);
147
Akinobu Mita79cd22d2006-10-12 14:29:33 +0900148 cmp = memcmp(&ip_needle, &node->nd_ipv4_address,
149 sizeof(ip_needle));
150 if (cmp < 0)
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800151 p = &(*p)->rb_left;
Akinobu Mita79cd22d2006-10-12 14:29:33 +0900152 else if (cmp > 0)
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800153 p = &(*p)->rb_right;
154 else {
155 ret = node;
156 break;
157 }
158 }
159
160 if (ret_p != NULL)
161 *ret_p = p;
162 if (ret_parent != NULL)
163 *ret_parent = parent;
164
165 return ret;
166}
167
168struct o2nm_node *o2nm_get_node_by_ip(__be32 addr)
169{
170 struct o2nm_node *node = NULL;
171 struct o2nm_cluster *cluster = o2nm_single_cluster;
172
173 if (cluster == NULL)
174 goto out;
175
176 read_lock(&cluster->cl_nodes_lock);
177 node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
178 if (node)
179 config_item_get(&node->nd_item);
180 read_unlock(&cluster->cl_nodes_lock);
181
182out:
183 return node;
184}
185EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip);
186
187void o2nm_node_put(struct o2nm_node *node)
188{
189 config_item_put(&node->nd_item);
190}
191EXPORT_SYMBOL_GPL(o2nm_node_put);
192
193void o2nm_node_get(struct o2nm_node *node)
194{
195 config_item_get(&node->nd_item);
196}
197EXPORT_SYMBOL_GPL(o2nm_node_get);
198
199u8 o2nm_this_node(void)
200{
201 u8 node_num = O2NM_MAX_NODES;
202
203 if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local)
204 node_num = o2nm_single_cluster->cl_local_node;
205
206 return node_num;
207}
208EXPORT_SYMBOL_GPL(o2nm_this_node);
209
210/* node configfs bits */
211
212static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item)
213{
214 return item ?
215 container_of(to_config_group(item), struct o2nm_cluster,
216 cl_group)
217 : NULL;
218}
219
220static struct o2nm_node *to_o2nm_node(struct config_item *item)
221{
222 return item ? container_of(item, struct o2nm_node, nd_item) : NULL;
223}
224
225static void o2nm_node_release(struct config_item *item)
226{
227 struct o2nm_node *node = to_o2nm_node(item);
228 kfree(node);
229}
230
231static ssize_t o2nm_node_num_read(struct o2nm_node *node, char *page)
232{
233 return sprintf(page, "%d\n", node->nd_num);
234}
235
236static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
237{
238 /* through the first node_set .parent
239 * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
240 return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
241}
242
243enum {
244 O2NM_NODE_ATTR_NUM = 0,
245 O2NM_NODE_ATTR_PORT,
246 O2NM_NODE_ATTR_ADDRESS,
247 O2NM_NODE_ATTR_LOCAL,
248};
249
250static ssize_t o2nm_node_num_write(struct o2nm_node *node, const char *page,
251 size_t count)
252{
253 struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
254 unsigned long tmp;
255 char *p = (char *)page;
256
257 tmp = simple_strtoul(p, &p, 0);
258 if (!p || (*p && (*p != '\n')))
259 return -EINVAL;
260
261 if (tmp >= O2NM_MAX_NODES)
262 return -ERANGE;
263
264 /* once we're in the cl_nodes tree networking can look us up by
265 * node number and try to use our address and port attributes
266 * to connect to this node.. make sure that they've been set
267 * before writing the node attribute? */
268 if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
269 !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
270 return -EINVAL; /* XXX */
271
272 write_lock(&cluster->cl_nodes_lock);
273 if (cluster->cl_nodes[tmp])
274 p = NULL;
275 else {
276 cluster->cl_nodes[tmp] = node;
277 node->nd_num = tmp;
278 set_bit(tmp, cluster->cl_nodes_bitmap);
279 }
280 write_unlock(&cluster->cl_nodes_lock);
281 if (p == NULL)
282 return -EEXIST;
283
284 return count;
285}
286static ssize_t o2nm_node_ipv4_port_read(struct o2nm_node *node, char *page)
287{
288 return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port));
289}
290
291static ssize_t o2nm_node_ipv4_port_write(struct o2nm_node *node,
292 const char *page, size_t count)
293{
294 unsigned long tmp;
295 char *p = (char *)page;
296
297 tmp = simple_strtoul(p, &p, 0);
298 if (!p || (*p && (*p != '\n')))
299 return -EINVAL;
300
301 if (tmp == 0)
302 return -EINVAL;
303 if (tmp >= (u16)-1)
304 return -ERANGE;
305
306 node->nd_ipv4_port = htons(tmp);
307
308 return count;
309}
310
311static ssize_t o2nm_node_ipv4_address_read(struct o2nm_node *node, char *page)
312{
313 return sprintf(page, "%u.%u.%u.%u\n", NIPQUAD(node->nd_ipv4_address));
314}
315
316static ssize_t o2nm_node_ipv4_address_write(struct o2nm_node *node,
317 const char *page,
318 size_t count)
319{
320 struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
321 int ret, i;
322 struct rb_node **p, *parent;
323 unsigned int octets[4];
324 __be32 ipv4_addr = 0;
325
326 ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
327 &octets[1], &octets[0]);
328 if (ret != 4)
329 return -EINVAL;
330
331 for (i = 0; i < ARRAY_SIZE(octets); i++) {
332 if (octets[i] > 255)
333 return -ERANGE;
334 be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
335 }
336
337 ret = 0;
338 write_lock(&cluster->cl_nodes_lock);
339 if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
340 ret = -EEXIST;
341 else {
342 rb_link_node(&node->nd_ip_node, parent, p);
343 rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
344 }
345 write_unlock(&cluster->cl_nodes_lock);
346 if (ret)
347 return ret;
348
349 memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
350
351 return count;
352}
353
354static ssize_t o2nm_node_local_read(struct o2nm_node *node, char *page)
355{
356 return sprintf(page, "%d\n", node->nd_local);
357}
358
359static ssize_t o2nm_node_local_write(struct o2nm_node *node, const char *page,
360 size_t count)
361{
362 struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
363 unsigned long tmp;
364 char *p = (char *)page;
365 ssize_t ret;
366
367 tmp = simple_strtoul(p, &p, 0);
368 if (!p || (*p && (*p != '\n')))
369 return -EINVAL;
370
371 tmp = !!tmp; /* boolean of whether this node wants to be local */
372
373 /* setting local turns on networking rx for now so we require having
374 * set everything else first */
375 if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
376 !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
377 !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
378 return -EINVAL; /* XXX */
379
380 /* the only failure case is trying to set a new local node
381 * when a different one is already set */
382 if (tmp && tmp == cluster->cl_has_local &&
383 cluster->cl_local_node != node->nd_num)
384 return -EBUSY;
385
386 /* bring up the rx thread if we're setting the new local node. */
387 if (tmp && !cluster->cl_has_local) {
388 ret = o2net_start_listening(node);
389 if (ret)
390 return ret;
391 }
392
393 if (!tmp && cluster->cl_has_local &&
394 cluster->cl_local_node == node->nd_num) {
395 o2net_stop_listening(node);
396 cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
397 }
398
399 node->nd_local = tmp;
400 if (node->nd_local) {
401 cluster->cl_has_local = tmp;
402 cluster->cl_local_node = node->nd_num;
403 }
404
405 return count;
406}
407
408struct o2nm_node_attribute {
409 struct configfs_attribute attr;
410 ssize_t (*show)(struct o2nm_node *, char *);
411 ssize_t (*store)(struct o2nm_node *, const char *, size_t);
412};
413
414static struct o2nm_node_attribute o2nm_node_attr_num = {
415 .attr = { .ca_owner = THIS_MODULE,
416 .ca_name = "num",
417 .ca_mode = S_IRUGO | S_IWUSR },
418 .show = o2nm_node_num_read,
419 .store = o2nm_node_num_write,
420};
421
422static struct o2nm_node_attribute o2nm_node_attr_ipv4_port = {
423 .attr = { .ca_owner = THIS_MODULE,
424 .ca_name = "ipv4_port",
425 .ca_mode = S_IRUGO | S_IWUSR },
426 .show = o2nm_node_ipv4_port_read,
427 .store = o2nm_node_ipv4_port_write,
428};
429
430static struct o2nm_node_attribute o2nm_node_attr_ipv4_address = {
431 .attr = { .ca_owner = THIS_MODULE,
432 .ca_name = "ipv4_address",
433 .ca_mode = S_IRUGO | S_IWUSR },
434 .show = o2nm_node_ipv4_address_read,
435 .store = o2nm_node_ipv4_address_write,
436};
437
438static struct o2nm_node_attribute o2nm_node_attr_local = {
439 .attr = { .ca_owner = THIS_MODULE,
440 .ca_name = "local",
441 .ca_mode = S_IRUGO | S_IWUSR },
442 .show = o2nm_node_local_read,
443 .store = o2nm_node_local_write,
444};
445
446static struct configfs_attribute *o2nm_node_attrs[] = {
447 [O2NM_NODE_ATTR_NUM] = &o2nm_node_attr_num.attr,
448 [O2NM_NODE_ATTR_PORT] = &o2nm_node_attr_ipv4_port.attr,
449 [O2NM_NODE_ATTR_ADDRESS] = &o2nm_node_attr_ipv4_address.attr,
450 [O2NM_NODE_ATTR_LOCAL] = &o2nm_node_attr_local.attr,
451 NULL,
452};
453
454static int o2nm_attr_index(struct configfs_attribute *attr)
455{
456 int i;
457 for (i = 0; i < ARRAY_SIZE(o2nm_node_attrs); i++) {
458 if (attr == o2nm_node_attrs[i])
459 return i;
460 }
461 BUG();
462 return 0;
463}
464
465static ssize_t o2nm_node_show(struct config_item *item,
466 struct configfs_attribute *attr,
467 char *page)
468{
469 struct o2nm_node *node = to_o2nm_node(item);
470 struct o2nm_node_attribute *o2nm_node_attr =
471 container_of(attr, struct o2nm_node_attribute, attr);
472 ssize_t ret = 0;
473
474 if (o2nm_node_attr->show)
475 ret = o2nm_node_attr->show(node, page);
476 return ret;
477}
478
479static ssize_t o2nm_node_store(struct config_item *item,
480 struct configfs_attribute *attr,
481 const char *page, size_t count)
482{
483 struct o2nm_node *node = to_o2nm_node(item);
484 struct o2nm_node_attribute *o2nm_node_attr =
485 container_of(attr, struct o2nm_node_attribute, attr);
486 ssize_t ret;
487 int attr_index = o2nm_attr_index(attr);
488
489 if (o2nm_node_attr->store == NULL) {
490 ret = -EINVAL;
491 goto out;
492 }
493
494 if (test_bit(attr_index, &node->nd_set_attributes))
495 return -EBUSY;
496
497 ret = o2nm_node_attr->store(node, page, count);
498 if (ret < count)
499 goto out;
500
501 set_bit(attr_index, &node->nd_set_attributes);
502out:
503 return ret;
504}
505
506static struct configfs_item_operations o2nm_node_item_ops = {
507 .release = o2nm_node_release,
508 .show_attribute = o2nm_node_show,
509 .store_attribute = o2nm_node_store,
510};
511
512static struct config_item_type o2nm_node_type = {
513 .ct_item_ops = &o2nm_node_item_ops,
514 .ct_attrs = o2nm_node_attrs,
515 .ct_owner = THIS_MODULE,
516};
517
518/* node set */
519
520struct o2nm_node_group {
521 struct config_group ns_group;
522 /* some stuff? */
523};
524
525#if 0
526static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group)
527{
528 return group ?
529 container_of(group, struct o2nm_node_group, ns_group)
530 : NULL;
531}
532#endif
533
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100534struct o2nm_cluster_attribute {
535 struct configfs_attribute attr;
536 ssize_t (*show)(struct o2nm_cluster *, char *);
537 ssize_t (*store)(struct o2nm_cluster *, const char *, size_t);
538};
539
540static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count,
541 unsigned int *val)
542{
543 unsigned long tmp;
544 char *p = (char *)page;
545
546 tmp = simple_strtoul(p, &p, 0);
547 if (!p || (*p && (*p != '\n')))
548 return -EINVAL;
549
550 if (tmp == 0)
551 return -EINVAL;
552 if (tmp >= (u32)-1)
553 return -ERANGE;
554
555 *val = tmp;
556
557 return count;
558}
559
560static ssize_t o2nm_cluster_attr_idle_timeout_ms_read(
561 struct o2nm_cluster *cluster, char *page)
562{
563 return sprintf(page, "%u\n", cluster->cl_idle_timeout_ms);
564}
565
566static ssize_t o2nm_cluster_attr_idle_timeout_ms_write(
567 struct o2nm_cluster *cluster, const char *page, size_t count)
568{
569 ssize_t ret;
570 unsigned int val;
571
572 ret = o2nm_cluster_attr_write(page, count, &val);
573
574 if (ret > 0) {
Andrew Beekhof828ae6a2006-12-04 14:04:55 +0100575 if (cluster->cl_idle_timeout_ms != val
576 && o2net_num_connected_peers()) {
577 mlog(ML_NOTICE,
578 "o2net: cannot change idle timeout after "
579 "the first peer has agreed to it."
580 " %d connected peers\n",
581 o2net_num_connected_peers());
582 ret = -EINVAL;
583 } else if (val <= cluster->cl_keepalive_delay_ms) {
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100584 mlog(ML_NOTICE, "o2net: idle timeout must be larger "
585 "than keepalive delay\n");
Andrew Beekhof828ae6a2006-12-04 14:04:55 +0100586 ret = -EINVAL;
587 } else {
588 cluster->cl_idle_timeout_ms = val;
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100589 }
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100590 }
591
592 return ret;
593}
594
595static ssize_t o2nm_cluster_attr_keepalive_delay_ms_read(
596 struct o2nm_cluster *cluster, char *page)
597{
598 return sprintf(page, "%u\n", cluster->cl_keepalive_delay_ms);
599}
600
601static ssize_t o2nm_cluster_attr_keepalive_delay_ms_write(
602 struct o2nm_cluster *cluster, const char *page, size_t count)
603{
604 ssize_t ret;
605 unsigned int val;
606
607 ret = o2nm_cluster_attr_write(page, count, &val);
608
609 if (ret > 0) {
Andrew Beekhof828ae6a2006-12-04 14:04:55 +0100610 if (cluster->cl_keepalive_delay_ms != val
611 && o2net_num_connected_peers()) {
612 mlog(ML_NOTICE,
613 "o2net: cannot change keepalive delay after"
614 " the first peer has agreed to it."
615 " %d connected peers\n",
616 o2net_num_connected_peers());
617 ret = -EINVAL;
618 } else if (val >= cluster->cl_idle_timeout_ms) {
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100619 mlog(ML_NOTICE, "o2net: keepalive delay must be "
620 "smaller than idle timeout\n");
Andrew Beekhof828ae6a2006-12-04 14:04:55 +0100621 ret = -EINVAL;
622 } else {
623 cluster->cl_keepalive_delay_ms = val;
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100624 }
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100625 }
626
627 return ret;
628}
629
630static ssize_t o2nm_cluster_attr_reconnect_delay_ms_read(
631 struct o2nm_cluster *cluster, char *page)
632{
633 return sprintf(page, "%u\n", cluster->cl_reconnect_delay_ms);
634}
635
636static ssize_t o2nm_cluster_attr_reconnect_delay_ms_write(
637 struct o2nm_cluster *cluster, const char *page, size_t count)
638{
639 return o2nm_cluster_attr_write(page, count,
640 &cluster->cl_reconnect_delay_ms);
641}
642static struct o2nm_cluster_attribute o2nm_cluster_attr_idle_timeout_ms = {
643 .attr = { .ca_owner = THIS_MODULE,
644 .ca_name = "idle_timeout_ms",
645 .ca_mode = S_IRUGO | S_IWUSR },
646 .show = o2nm_cluster_attr_idle_timeout_ms_read,
647 .store = o2nm_cluster_attr_idle_timeout_ms_write,
648};
649
650static struct o2nm_cluster_attribute o2nm_cluster_attr_keepalive_delay_ms = {
651 .attr = { .ca_owner = THIS_MODULE,
652 .ca_name = "keepalive_delay_ms",
653 .ca_mode = S_IRUGO | S_IWUSR },
654 .show = o2nm_cluster_attr_keepalive_delay_ms_read,
655 .store = o2nm_cluster_attr_keepalive_delay_ms_write,
656};
657
658static struct o2nm_cluster_attribute o2nm_cluster_attr_reconnect_delay_ms = {
659 .attr = { .ca_owner = THIS_MODULE,
660 .ca_name = "reconnect_delay_ms",
661 .ca_mode = S_IRUGO | S_IWUSR },
662 .show = o2nm_cluster_attr_reconnect_delay_ms_read,
663 .store = o2nm_cluster_attr_reconnect_delay_ms_write,
664};
665
666static struct configfs_attribute *o2nm_cluster_attrs[] = {
667 &o2nm_cluster_attr_idle_timeout_ms.attr,
668 &o2nm_cluster_attr_keepalive_delay_ms.attr,
669 &o2nm_cluster_attr_reconnect_delay_ms.attr,
670 NULL,
671};
672static ssize_t o2nm_cluster_show(struct config_item *item,
673 struct configfs_attribute *attr,
674 char *page)
675{
676 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
677 struct o2nm_cluster_attribute *o2nm_cluster_attr =
678 container_of(attr, struct o2nm_cluster_attribute, attr);
679 ssize_t ret = 0;
680
681 if (o2nm_cluster_attr->show)
682 ret = o2nm_cluster_attr->show(cluster, page);
683 return ret;
684}
685
686static ssize_t o2nm_cluster_store(struct config_item *item,
687 struct configfs_attribute *attr,
688 const char *page, size_t count)
689{
690 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
691 struct o2nm_cluster_attribute *o2nm_cluster_attr =
692 container_of(attr, struct o2nm_cluster_attribute, attr);
693 ssize_t ret;
694
695 if (o2nm_cluster_attr->store == NULL) {
696 ret = -EINVAL;
697 goto out;
698 }
699
700 ret = o2nm_cluster_attr->store(cluster, page, count);
701 if (ret < count)
702 goto out;
703out:
704 return ret;
705}
706
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800707static struct config_item *o2nm_node_group_make_item(struct config_group *group,
708 const char *name)
709{
710 struct o2nm_node *node = NULL;
711 struct config_item *ret = NULL;
712
713 if (strlen(name) > O2NM_MAX_NAME_LEN)
714 goto out; /* ENAMETOOLONG */
715
Robert P. J. Daycd861282006-12-13 00:34:52 -0800716 node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL);
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800717 if (node == NULL)
718 goto out; /* ENOMEM */
719
720 strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
721 config_item_init_type_name(&node->nd_item, name, &o2nm_node_type);
722 spin_lock_init(&node->nd_lock);
723
724 ret = &node->nd_item;
725
726out:
727 if (ret == NULL)
728 kfree(node);
729
730 return ret;
731}
732
733static void o2nm_node_group_drop_item(struct config_group *group,
734 struct config_item *item)
735{
736 struct o2nm_node *node = to_o2nm_node(item);
737 struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
738
739 o2net_disconnect_node(node);
740
741 if (cluster->cl_has_local &&
742 (cluster->cl_local_node == node->nd_num)) {
743 cluster->cl_has_local = 0;
744 cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
745 o2net_stop_listening(node);
746 }
747
748 /* XXX call into net to stop this node from trading messages */
749
750 write_lock(&cluster->cl_nodes_lock);
751
752 /* XXX sloppy */
753 if (node->nd_ipv4_address)
754 rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
755
756 /* nd_num might be 0 if the node number hasn't been set.. */
757 if (cluster->cl_nodes[node->nd_num] == node) {
758 cluster->cl_nodes[node->nd_num] = NULL;
759 clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
760 }
761 write_unlock(&cluster->cl_nodes_lock);
762
763 config_item_put(item);
764}
765
766static struct configfs_group_operations o2nm_node_group_group_ops = {
767 .make_item = o2nm_node_group_make_item,
768 .drop_item = o2nm_node_group_drop_item,
769};
770
771static struct config_item_type o2nm_node_group_type = {
772 .ct_group_ops = &o2nm_node_group_group_ops,
773 .ct_owner = THIS_MODULE,
774};
775
776/* cluster */
777
778static void o2nm_cluster_release(struct config_item *item)
779{
780 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
781
782 kfree(cluster->cl_group.default_groups);
783 kfree(cluster);
784}
785
786static struct configfs_item_operations o2nm_cluster_item_ops = {
787 .release = o2nm_cluster_release,
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100788 .show_attribute = o2nm_cluster_show,
789 .store_attribute = o2nm_cluster_store,
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800790};
791
792static struct config_item_type o2nm_cluster_type = {
793 .ct_item_ops = &o2nm_cluster_item_ops,
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100794 .ct_attrs = o2nm_cluster_attrs,
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800795 .ct_owner = THIS_MODULE,
796};
797
798/* cluster set */
799
800struct o2nm_cluster_group {
801 struct configfs_subsystem cs_subsys;
802 /* some stuff? */
803};
804
805#if 0
806static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group)
807{
808 return group ?
809 container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys)
810 : NULL;
811}
812#endif
813
814static struct config_group *o2nm_cluster_group_make_group(struct config_group *group,
815 const char *name)
816{
817 struct o2nm_cluster *cluster = NULL;
818 struct o2nm_node_group *ns = NULL;
819 struct config_group *o2hb_group = NULL, *ret = NULL;
820 void *defs = NULL;
821
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800822 /* this runs under the parent dir's i_mutex; there can be only
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800823 * one caller in here at a time */
824 if (o2nm_single_cluster)
825 goto out; /* ENOSPC */
826
Robert P. J. Daycd861282006-12-13 00:34:52 -0800827 cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL);
828 ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL);
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800829 defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
830 o2hb_group = o2hb_alloc_hb_set();
831 if (cluster == NULL || ns == NULL || o2hb_group == NULL || defs == NULL)
832 goto out;
833
834 config_group_init_type_name(&cluster->cl_group, name,
835 &o2nm_cluster_type);
836 config_group_init_type_name(&ns->ns_group, "node",
837 &o2nm_node_group_type);
838
839 cluster->cl_group.default_groups = defs;
840 cluster->cl_group.default_groups[0] = &ns->ns_group;
841 cluster->cl_group.default_groups[1] = o2hb_group;
842 cluster->cl_group.default_groups[2] = NULL;
843 rwlock_init(&cluster->cl_nodes_lock);
844 cluster->cl_node_ip_tree = RB_ROOT;
Jeff Mahoneyb5dd8032006-12-04 14:04:54 +0100845 cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT;
846 cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT;
847 cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT;
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800848
849 ret = &cluster->cl_group;
850 o2nm_single_cluster = cluster;
851
852out:
853 if (ret == NULL) {
854 kfree(cluster);
855 kfree(ns);
856 o2hb_free_hb_set(o2hb_group);
857 kfree(defs);
858 }
859
860 return ret;
861}
862
863static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item)
864{
865 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
866 int i;
867 struct config_item *killme;
868
869 BUG_ON(o2nm_single_cluster != cluster);
870 o2nm_single_cluster = NULL;
871
872 for (i = 0; cluster->cl_group.default_groups[i]; i++) {
873 killme = &cluster->cl_group.default_groups[i]->cg_item;
874 cluster->cl_group.default_groups[i] = NULL;
875 config_item_put(killme);
876 }
877
878 config_item_put(item);
879}
880
881static struct configfs_group_operations o2nm_cluster_group_group_ops = {
882 .make_group = o2nm_cluster_group_make_group,
883 .drop_item = o2nm_cluster_group_drop_item,
884};
885
886static struct config_item_type o2nm_cluster_group_type = {
887 .ct_group_ops = &o2nm_cluster_group_group_ops,
888 .ct_owner = THIS_MODULE,
889};
890
891static struct o2nm_cluster_group o2nm_cluster_group = {
892 .cs_subsys = {
893 .su_group = {
894 .cg_item = {
895 .ci_namebuf = "cluster",
896 .ci_type = &o2nm_cluster_group_type,
897 },
898 },
899 },
900};
901
Joel Becker14829422007-06-14 21:40:49 -0700902int o2nm_depend_item(struct config_item *item)
903{
904 return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
905}
906
907void o2nm_undepend_item(struct config_item *item)
908{
909 configfs_undepend_item(&o2nm_cluster_group.cs_subsys, item);
910}
911
Joel Becker16c6a4f2007-06-19 11:34:03 -0700912int o2nm_depend_this_node(void)
913{
914 int ret = 0;
915 struct o2nm_node *local_node;
916
917 local_node = o2nm_get_node_by_num(o2nm_this_node());
918 if (!local_node) {
919 ret = -EINVAL;
920 goto out;
921 }
922
923 ret = o2nm_depend_item(&local_node->nd_item);
924 o2nm_node_put(local_node);
925
926out:
927 return ret;
928}
929
930void o2nm_undepend_this_node(void)
931{
932 struct o2nm_node *local_node;
933
934 local_node = o2nm_get_node_by_num(o2nm_this_node());
935 BUG_ON(!local_node);
936
937 o2nm_undepend_item(&local_node->nd_item);
938 o2nm_node_put(local_node);
939}
940
941
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800942static void __exit exit_o2nm(void)
943{
944 if (ocfs2_table_header)
945 unregister_sysctl_table(ocfs2_table_header);
946
947 /* XXX sync with hb callbacks and shut down hb? */
948 o2net_unregister_hb_callbacks();
949 configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
950 o2cb_sys_shutdown();
951
952 o2net_exit();
953}
954
955static int __init init_o2nm(void)
956{
957 int ret = -1;
958
959 cluster_print_version();
960
961 o2hb_init();
Sunil Mushran2309e9e2008-04-14 10:46:19 -0700962
963 ret = o2net_init();
964 if (ret)
965 goto out;
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800966
Eric W. Biederman0b4d4142007-02-14 00:34:09 -0800967 ocfs2_table_header = register_sysctl_table(ocfs2_root_table);
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800968 if (!ocfs2_table_header) {
969 printk(KERN_ERR "nodemanager: unable to register sysctl\n");
970 ret = -ENOMEM; /* or something. */
Jeff Mahoney895928b2006-02-21 16:54:00 -0800971 goto out_o2net;
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800972 }
973
974 ret = o2net_register_hb_callbacks();
975 if (ret)
976 goto out_sysctl;
977
978 config_group_init(&o2nm_cluster_group.cs_subsys.su_group);
Joel Beckere6bd07a2007-07-06 23:33:17 -0700979 mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex);
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800980 ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys);
981 if (ret) {
982 printk(KERN_ERR "nodemanager: Registration returned %d\n", ret);
983 goto out_callbacks;
984 }
985
986 ret = o2cb_sys_init();
987 if (!ret)
988 goto out;
989
990 configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
991out_callbacks:
992 o2net_unregister_hb_callbacks();
993out_sysctl:
994 unregister_sysctl_table(ocfs2_table_header);
Jeff Mahoney895928b2006-02-21 16:54:00 -0800995out_o2net:
996 o2net_exit();
Kurt Hackel0c83ed82005-12-15 14:31:23 -0800997out:
998 return ret;
999}
1000
1001MODULE_AUTHOR("Oracle");
1002MODULE_LICENSE("GPL");
1003
1004module_init(init_o2nm)
1005module_exit(exit_o2nm)