blob: 01ae1964c938cd828379dc594087299309531725 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/s390/appldata/appldata_base.c
3 *
4 * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
5 * Exports appldata_register_ops() and appldata_unregister_ops() for the
6 * data gathering modules.
7 *
8 * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH.
9 *
10 * Author: Gerald Schaefer <geraldsc@de.ibm.com>
11 */
12
13#include <linux/config.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/errno.h>
18#include <asm/uaccess.h>
19#include <asm/io.h>
20#include <asm/smp.h>
21#include <linux/interrupt.h>
22#include <linux/proc_fs.h>
23#include <linux/page-flags.h>
24#include <linux/swap.h>
25#include <linux/pagemap.h>
26#include <linux/sysctl.h>
27#include <asm/timer.h>
28//#include <linux/kernel_stat.h>
29#include <linux/notifier.h>
30#include <linux/cpu.h>
31
32#include "appldata.h"
33
34
35#define MY_PRINT_NAME "appldata" /* for debug messages, etc. */
36#define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for
37 sampling interval in
38 milliseconds */
39
40#define TOD_MICRO 0x01000 /* nr. of TOD clock units
41 for 1 microsecond */
42#ifndef CONFIG_ARCH_S390X
43
44#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
45#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
46#define APPLDATA_GEN_EVENT_RECORD 0x02
47#define APPLDATA_START_CONFIG_REC 0x03
48
49#else
50
51#define APPLDATA_START_INTERVAL_REC 0x80
52#define APPLDATA_STOP_REC 0x81
53#define APPLDATA_GEN_EVENT_RECORD 0x82
54#define APPLDATA_START_CONFIG_REC 0x83
55
56#endif /* CONFIG_ARCH_S390X */
57
58
59/*
60 * Parameter list for DIAGNOSE X'DC'
61 */
62#ifndef CONFIG_ARCH_S390X
63struct appldata_parameter_list {
64 u16 diag; /* The DIAGNOSE code X'00DC' */
65 u8 function; /* The function code for the DIAGNOSE */
66 u8 parlist_length; /* Length of the parameter list */
67 u32 product_id_addr; /* Address of the 16-byte product ID */
68 u16 reserved;
69 u16 buffer_length; /* Length of the application data buffer */
70 u32 buffer_addr; /* Address of the application data buffer */
71};
72#else
73struct appldata_parameter_list {
74 u16 diag;
75 u8 function;
76 u8 parlist_length;
77 u32 unused01;
78 u16 reserved;
79 u16 buffer_length;
80 u32 unused02;
81 u64 product_id_addr;
82 u64 buffer_addr;
83};
84#endif /* CONFIG_ARCH_S390X */
85
86/*
87 * /proc entries (sysctl)
88 */
89static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
90static int appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
91 void __user *buffer, size_t *lenp, loff_t *ppos);
92static int appldata_interval_handler(ctl_table *ctl, int write,
93 struct file *filp,
94 void __user *buffer,
95 size_t *lenp, loff_t *ppos);
96
97static struct ctl_table_header *appldata_sysctl_header;
98static struct ctl_table appldata_table[] = {
99 {
100 .ctl_name = CTL_APPLDATA_TIMER,
101 .procname = "timer",
102 .mode = S_IRUGO | S_IWUSR,
103 .proc_handler = &appldata_timer_handler,
104 },
105 {
106 .ctl_name = CTL_APPLDATA_INTERVAL,
107 .procname = "interval",
108 .mode = S_IRUGO | S_IWUSR,
109 .proc_handler = &appldata_interval_handler,
110 },
111 { .ctl_name = 0 }
112};
113
114static struct ctl_table appldata_dir_table[] = {
115 {
116 .ctl_name = CTL_APPLDATA,
117 .procname = appldata_proc_name,
118 .maxlen = 0,
119 .mode = S_IRUGO | S_IXUGO,
120 .child = appldata_table,
121 },
122 { .ctl_name = 0 }
123};
124
125/*
126 * Timer
127 */
128DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
129static atomic_t appldata_expire_count = ATOMIC_INIT(0);
130
131static DEFINE_SPINLOCK(appldata_timer_lock);
132static int appldata_interval = APPLDATA_CPU_INTERVAL;
133static int appldata_timer_active;
134
135/*
136 * Tasklet
137 */
138static struct tasklet_struct appldata_tasklet_struct;
139
140/*
141 * Ops list
142 */
143static DEFINE_SPINLOCK(appldata_ops_lock);
144static LIST_HEAD(appldata_ops_list);
145
146
147/************************* timer, tasklet, DIAG ******************************/
148/*
149 * appldata_timer_function()
150 *
151 * schedule tasklet and reschedule timer
152 */
153static void appldata_timer_function(unsigned long data, struct pt_regs *regs)
154{
155 P_DEBUG(" -= Timer =-\n");
156 P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(),
157 atomic_read(&appldata_expire_count));
158 if (atomic_dec_and_test(&appldata_expire_count)) {
159 atomic_set(&appldata_expire_count, num_online_cpus());
160 tasklet_schedule((struct tasklet_struct *) data);
161 }
162}
163
164/*
165 * appldata_tasklet_function()
166 *
167 * call data gathering function for each (active) module
168 */
169static void appldata_tasklet_function(unsigned long data)
170{
171 struct list_head *lh;
172 struct appldata_ops *ops;
173 int i;
174
175 P_DEBUG(" -= Tasklet =-\n");
176 i = 0;
177 spin_lock(&appldata_ops_lock);
178 list_for_each(lh, &appldata_ops_list) {
179 ops = list_entry(lh, struct appldata_ops, list);
180 P_DEBUG("list_for_each loop: %i) active = %u, name = %s\n",
181 ++i, ops->active, ops->name);
182 if (ops->active == 1) {
183 ops->callback(ops->data);
184 }
185 }
186 spin_unlock(&appldata_ops_lock);
187}
188
189/*
190 * appldata_diag()
191 *
192 * prepare parameter list, issue DIAG 0xDC
193 */
194static int appldata_diag(char record_nr, u16 function, unsigned long buffer,
195 u16 length)
196{
197 unsigned long ry;
198 struct appldata_product_id {
199 char prod_nr[7]; /* product nr. */
200 char prod_fn[2]; /* product function */
201 char record_nr; /* record nr. */
202 char version_nr[2]; /* version */
203 char release_nr[2]; /* release */
204 char mod_lvl[2]; /* modification lvl. */
205 } appldata_product_id = {
206 /* all strings are EBCDIC, record_nr is byte */
207 .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
208 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
209 .prod_fn = {0xD5, 0xD3}, /* "NL" */
210 .record_nr = record_nr,
211 .version_nr = {0xF2, 0xF6}, /* "26" */
212 .release_nr = {0xF0, 0xF1}, /* "01" */
213 .mod_lvl = {0xF0, 0xF0}, /* "00" */
214 };
215 struct appldata_parameter_list appldata_parameter_list = {
216 .diag = 0xDC,
217 .function = function,
218 .parlist_length =
219 sizeof(appldata_parameter_list),
220 .buffer_length = length,
221 .product_id_addr =
222 (unsigned long) &appldata_product_id,
223 .buffer_addr = virt_to_phys((void *) buffer)
224 };
225
226 if (!MACHINE_IS_VM)
227 return -ENOSYS;
228 ry = -1;
229 asm volatile(
230 "diag %1,%0,0xDC\n\t"
231 : "=d" (ry) : "d" (&(appldata_parameter_list)) : "cc");
232 return (int) ry;
233}
234/********************** timer, tasklet, DIAG <END> ***************************/
235
236
237/****************************** /proc stuff **********************************/
238
239/*
240 * appldata_mod_vtimer_wrap()
241 *
242 * wrapper function for mod_virt_timer(), because smp_call_function_on()
243 * accepts only one parameter.
244 */
245static void __appldata_mod_vtimer_wrap(void *p) {
246 struct {
247 struct vtimer_list *timer;
248 u64 expires;
249 } *args = p;
250 mod_virt_timer(args->timer, args->expires);
251}
252
253#define APPLDATA_ADD_TIMER 0
254#define APPLDATA_DEL_TIMER 1
255#define APPLDATA_MOD_TIMER 2
256
257/*
258 * __appldata_vtimer_setup()
259 *
260 * Add, delete or modify virtual timers on all online cpus.
261 * The caller needs to get the appldata_timer_lock spinlock.
262 */
263static void
264__appldata_vtimer_setup(int cmd)
265{
266 u64 per_cpu_interval;
267 int i;
268
269 switch (cmd) {
270 case APPLDATA_ADD_TIMER:
271 if (appldata_timer_active)
272 break;
273 per_cpu_interval = (u64) (appldata_interval*1000 /
274 num_online_cpus()) * TOD_MICRO;
275 for_each_online_cpu(i) {
276 per_cpu(appldata_timer, i).expires = per_cpu_interval;
277 smp_call_function_on(add_virt_timer_periodic,
278 &per_cpu(appldata_timer, i),
279 0, 1, i);
280 }
281 appldata_timer_active = 1;
282 P_INFO("Monitoring timer started.\n");
283 break;
284 case APPLDATA_DEL_TIMER:
285 for_each_online_cpu(i)
286 del_virt_timer(&per_cpu(appldata_timer, i));
287 if (!appldata_timer_active)
288 break;
289 appldata_timer_active = 0;
290 atomic_set(&appldata_expire_count, num_online_cpus());
291 P_INFO("Monitoring timer stopped.\n");
292 break;
293 case APPLDATA_MOD_TIMER:
294 per_cpu_interval = (u64) (appldata_interval*1000 /
295 num_online_cpus()) * TOD_MICRO;
296 if (!appldata_timer_active)
297 break;
298 for_each_online_cpu(i) {
299 struct {
300 struct vtimer_list *timer;
301 u64 expires;
302 } args;
303 args.timer = &per_cpu(appldata_timer, i);
304 args.expires = per_cpu_interval;
305 smp_call_function_on(__appldata_mod_vtimer_wrap,
306 &args, 0, 1, i);
307 }
308 }
309}
310
311/*
312 * appldata_timer_handler()
313 *
314 * Start/Stop timer, show status of timer (0 = not active, 1 = active)
315 */
316static int
317appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
318 void __user *buffer, size_t *lenp, loff_t *ppos)
319{
320 int len;
321 char buf[2];
322
323 if (!*lenp || *ppos) {
324 *lenp = 0;
325 return 0;
326 }
327 if (!write) {
328 len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n");
329 if (len > *lenp)
330 len = *lenp;
331 if (copy_to_user(buffer, buf, len))
332 return -EFAULT;
333 goto out;
334 }
335 len = *lenp;
336 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
337 return -EFAULT;
338 spin_lock(&appldata_timer_lock);
339 if (buf[0] == '1')
340 __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
341 else if (buf[0] == '0')
342 __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
343 spin_unlock(&appldata_timer_lock);
344out:
345 *lenp = len;
346 *ppos += len;
347 return 0;
348}
349
350/*
351 * appldata_interval_handler()
352 *
353 * Set (CPU) timer interval for collection of data (in milliseconds), show
354 * current timer interval.
355 */
356static int
357appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
358 void __user *buffer, size_t *lenp, loff_t *ppos)
359{
360 int len, interval;
361 char buf[16];
362
363 if (!*lenp || *ppos) {
364 *lenp = 0;
365 return 0;
366 }
367 if (!write) {
368 len = sprintf(buf, "%i\n", appldata_interval);
369 if (len > *lenp)
370 len = *lenp;
371 if (copy_to_user(buffer, buf, len))
372 return -EFAULT;
373 goto out;
374 }
375 len = *lenp;
376 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) {
377 return -EFAULT;
378 }
379 sscanf(buf, "%i", &interval);
380 if (interval <= 0) {
381 P_ERROR("Timer CPU interval has to be > 0!\n");
382 return -EINVAL;
383 }
384
385 spin_lock(&appldata_timer_lock);
386 appldata_interval = interval;
387 __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
388 spin_unlock(&appldata_timer_lock);
389
390 P_INFO("Monitoring CPU interval set to %u milliseconds.\n",
391 interval);
392out:
393 *lenp = len;
394 *ppos += len;
395 return 0;
396}
397
398/*
399 * appldata_generic_handler()
400 *
401 * Generic start/stop monitoring and DIAG, show status of
402 * monitoring (0 = not in process, 1 = in process)
403 */
404static int
405appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
406 void __user *buffer, size_t *lenp, loff_t *ppos)
407{
408 struct appldata_ops *ops = NULL, *tmp_ops;
409 int rc, len, found;
410 char buf[2];
411 struct list_head *lh;
412
413 found = 0;
414 spin_lock_bh(&appldata_ops_lock);
415 list_for_each(lh, &appldata_ops_list) {
416 tmp_ops = list_entry(lh, struct appldata_ops, list);
417 if (&tmp_ops->ctl_table[2] == ctl) {
418 found = 1;
419 }
420 }
421 if (!found) {
422 spin_unlock_bh(&appldata_ops_lock);
423 return -ENODEV;
424 }
425 ops = ctl->data;
426 if (!try_module_get(ops->owner)) { // protect this function
427 spin_unlock_bh(&appldata_ops_lock);
428 return -ENODEV;
429 }
430 spin_unlock_bh(&appldata_ops_lock);
431
432 if (!*lenp || *ppos) {
433 *lenp = 0;
434 module_put(ops->owner);
435 return 0;
436 }
437 if (!write) {
438 len = sprintf(buf, ops->active ? "1\n" : "0\n");
439 if (len > *lenp)
440 len = *lenp;
441 if (copy_to_user(buffer, buf, len)) {
442 module_put(ops->owner);
443 return -EFAULT;
444 }
445 goto out;
446 }
447 len = *lenp;
448 if (copy_from_user(buf, buffer,
449 len > sizeof(buf) ? sizeof(buf) : len)) {
450 module_put(ops->owner);
451 return -EFAULT;
452 }
453
454 spin_lock_bh(&appldata_ops_lock);
455 if ((buf[0] == '1') && (ops->active == 0)) {
456 if (!try_module_get(ops->owner)) { // protect tasklet
457 spin_unlock_bh(&appldata_ops_lock);
458 module_put(ops->owner);
459 return -ENODEV;
460 }
461 ops->active = 1;
462 ops->callback(ops->data); // init record
463 rc = appldata_diag(ops->record_nr,
464 APPLDATA_START_INTERVAL_REC,
465 (unsigned long) ops->data, ops->size);
466 if (rc != 0) {
467 P_ERROR("START DIAG 0xDC for %s failed, "
468 "return code: %d\n", ops->name, rc);
469 module_put(ops->owner);
470 ops->active = 0;
471 } else {
472 P_INFO("Monitoring %s data enabled, "
473 "DIAG 0xDC started.\n", ops->name);
474 }
475 } else if ((buf[0] == '0') && (ops->active == 1)) {
476 ops->active = 0;
477 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
478 (unsigned long) ops->data, ops->size);
479 if (rc != 0) {
480 P_ERROR("STOP DIAG 0xDC for %s failed, "
481 "return code: %d\n", ops->name, rc);
482 } else {
483 P_INFO("Monitoring %s data disabled, "
484 "DIAG 0xDC stopped.\n", ops->name);
485 }
486 module_put(ops->owner);
487 }
488 spin_unlock_bh(&appldata_ops_lock);
489out:
490 *lenp = len;
491 *ppos += len;
492 module_put(ops->owner);
493 return 0;
494}
495
496/*************************** /proc stuff <END> *******************************/
497
498
499/************************* module-ops management *****************************/
500/*
501 * appldata_register_ops()
502 *
503 * update ops list, register /proc/sys entries
504 */
505int appldata_register_ops(struct appldata_ops *ops)
506{
507 struct list_head *lh;
508 struct appldata_ops *tmp_ops;
509 int i;
510
511 i = 0;
512
513 if ((ops->size > APPLDATA_MAX_REC_SIZE) ||
514 (ops->size < 0)){
515 P_ERROR("Invalid size of %s record = %i, maximum = %i!\n",
516 ops->name, ops->size, APPLDATA_MAX_REC_SIZE);
517 return -ENOMEM;
518 }
519 if ((ops->ctl_nr == CTL_APPLDATA) ||
520 (ops->ctl_nr == CTL_APPLDATA_TIMER) ||
521 (ops->ctl_nr == CTL_APPLDATA_INTERVAL)) {
522 P_ERROR("ctl_nr %i already in use!\n", ops->ctl_nr);
523 return -EBUSY;
524 }
525 ops->ctl_table = kmalloc(4*sizeof(struct ctl_table), GFP_KERNEL);
526 if (ops->ctl_table == NULL) {
527 P_ERROR("Not enough memory for %s ctl_table!\n", ops->name);
528 return -ENOMEM;
529 }
530 memset(ops->ctl_table, 0, 4*sizeof(struct ctl_table));
531
532 spin_lock_bh(&appldata_ops_lock);
533 list_for_each(lh, &appldata_ops_list) {
534 tmp_ops = list_entry(lh, struct appldata_ops, list);
535 P_DEBUG("register_ops loop: %i) name = %s, ctl = %i\n",
536 ++i, tmp_ops->name, tmp_ops->ctl_nr);
537 P_DEBUG("Comparing %s (ctl %i) with %s (ctl %i)\n",
538 tmp_ops->name, tmp_ops->ctl_nr, ops->name,
539 ops->ctl_nr);
540 if (strncmp(tmp_ops->name, ops->name,
541 APPLDATA_PROC_NAME_LENGTH) == 0) {
542 P_ERROR("Name \"%s\" already registered!\n", ops->name);
543 kfree(ops->ctl_table);
544 spin_unlock_bh(&appldata_ops_lock);
545 return -EBUSY;
546 }
547 if (tmp_ops->ctl_nr == ops->ctl_nr) {
548 P_ERROR("ctl_nr %i already registered!\n", ops->ctl_nr);
549 kfree(ops->ctl_table);
550 spin_unlock_bh(&appldata_ops_lock);
551 return -EBUSY;
552 }
553 }
554 list_add(&ops->list, &appldata_ops_list);
555 spin_unlock_bh(&appldata_ops_lock);
556
557 ops->ctl_table[0].ctl_name = CTL_APPLDATA;
558 ops->ctl_table[0].procname = appldata_proc_name;
559 ops->ctl_table[0].maxlen = 0;
560 ops->ctl_table[0].mode = S_IRUGO | S_IXUGO;
561 ops->ctl_table[0].child = &ops->ctl_table[2];
562
563 ops->ctl_table[1].ctl_name = 0;
564
565 ops->ctl_table[2].ctl_name = ops->ctl_nr;
566 ops->ctl_table[2].procname = ops->name;
567 ops->ctl_table[2].mode = S_IRUGO | S_IWUSR;
568 ops->ctl_table[2].proc_handler = appldata_generic_handler;
569 ops->ctl_table[2].data = ops;
570
571 ops->ctl_table[3].ctl_name = 0;
572
573 ops->sysctl_header = register_sysctl_table(ops->ctl_table,1);
574
575 P_INFO("%s-ops registered!\n", ops->name);
576 return 0;
577}
578
579/*
580 * appldata_unregister_ops()
581 *
582 * update ops list, unregister /proc entries, stop DIAG if necessary
583 */
584void appldata_unregister_ops(struct appldata_ops *ops)
585{
586 spin_lock_bh(&appldata_ops_lock);
587 unregister_sysctl_table(ops->sysctl_header);
588 list_del(&ops->list);
589 kfree(ops->ctl_table);
590 ops->ctl_table = NULL;
591 spin_unlock_bh(&appldata_ops_lock);
592 P_INFO("%s-ops unregistered!\n", ops->name);
593}
594/********************** module-ops management <END> **************************/
595
596
597/******************************* init / exit *********************************/
598
599static void
600appldata_online_cpu(int cpu)
601{
602 init_virt_timer(&per_cpu(appldata_timer, cpu));
603 per_cpu(appldata_timer, cpu).function = appldata_timer_function;
604 per_cpu(appldata_timer, cpu).data = (unsigned long)
605 &appldata_tasklet_struct;
606 atomic_inc(&appldata_expire_count);
607 spin_lock(&appldata_timer_lock);
608 __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
609 spin_unlock(&appldata_timer_lock);
610}
611
612static void
613appldata_offline_cpu(int cpu)
614{
615 del_virt_timer(&per_cpu(appldata_timer, cpu));
616 if (atomic_dec_and_test(&appldata_expire_count)) {
617 atomic_set(&appldata_expire_count, num_online_cpus());
618 tasklet_schedule(&appldata_tasklet_struct);
619 }
620 spin_lock(&appldata_timer_lock);
621 __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
622 spin_unlock(&appldata_timer_lock);
623}
624
625static int
626appldata_cpu_notify(struct notifier_block *self,
627 unsigned long action, void *hcpu)
628{
629 switch (action) {
630 case CPU_ONLINE:
631 appldata_online_cpu((long) hcpu);
632 break;
633#ifdef CONFIG_HOTPLUG_CPU
634 case CPU_DEAD:
635 appldata_offline_cpu((long) hcpu);
636 break;
637#endif
638 default:
639 break;
640 }
641 return NOTIFY_OK;
642}
643
644static struct notifier_block __devinitdata appldata_nb = {
645 .notifier_call = appldata_cpu_notify,
646};
647
648/*
649 * appldata_init()
650 *
651 * init timer and tasklet, register /proc entries
652 */
653static int __init appldata_init(void)
654{
655 int i;
656
657 P_DEBUG("sizeof(parameter_list) = %lu\n",
658 sizeof(struct appldata_parameter_list));
659
660 for_each_online_cpu(i)
661 appldata_online_cpu(i);
662
663 /* Register cpu hotplug notifier */
664 register_cpu_notifier(&appldata_nb);
665
666 appldata_sysctl_header = register_sysctl_table(appldata_dir_table, 1);
667#ifdef MODULE
668 appldata_dir_table[0].de->owner = THIS_MODULE;
669 appldata_table[0].de->owner = THIS_MODULE;
670 appldata_table[1].de->owner = THIS_MODULE;
671#endif
672
673 tasklet_init(&appldata_tasklet_struct, appldata_tasklet_function, 0);
674 P_DEBUG("Base interface initialized.\n");
675 return 0;
676}
677
678/*
679 * appldata_exit()
680 *
681 * stop timer and tasklet, unregister /proc entries
682 */
683static void __exit appldata_exit(void)
684{
685 struct list_head *lh;
686 struct appldata_ops *ops;
687 int rc, i;
688
689 P_DEBUG("Unloading module ...\n");
690 /*
691 * ops list should be empty, but just in case something went wrong...
692 */
693 spin_lock_bh(&appldata_ops_lock);
694 list_for_each(lh, &appldata_ops_list) {
695 ops = list_entry(lh, struct appldata_ops, list);
696 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
697 (unsigned long) ops->data, ops->size);
698 if (rc != 0) {
699 P_ERROR("STOP DIAG 0xDC for %s failed, "
700 "return code: %d\n", ops->name, rc);
701 }
702 }
703 spin_unlock_bh(&appldata_ops_lock);
704
705 for_each_online_cpu(i)
706 appldata_offline_cpu(i);
707
708 appldata_timer_active = 0;
709
710 unregister_sysctl_table(appldata_sysctl_header);
711
712 tasklet_kill(&appldata_tasklet_struct);
713 P_DEBUG("... module unloaded!\n");
714}
715/**************************** init / exit <END> ******************************/
716
717
718module_init(appldata_init);
719module_exit(appldata_exit);
720MODULE_LICENSE("GPL");
721MODULE_AUTHOR("Gerald Schaefer");
722MODULE_DESCRIPTION("Linux-VM Monitor Stream, base infrastructure");
723
724EXPORT_SYMBOL_GPL(appldata_register_ops);
725EXPORT_SYMBOL_GPL(appldata_unregister_ops);
726
727#ifdef MODULE
728/*
729 * Kernel symbols needed by appldata_mem and appldata_os modules.
730 * However, if this file is compiled as a module (for testing only), these
731 * symbols are not exported. In this case, we define them locally and export
732 * those.
733 */
734void si_swapinfo(struct sysinfo *val)
735{
736 val->freeswap = -1ul;
737 val->totalswap = -1ul;
738}
739
740unsigned long avenrun[3] = {-1 - FIXED_1/200, -1 - FIXED_1/200,
741 -1 - FIXED_1/200};
742int nr_threads = -1;
743
744void get_full_page_state(struct page_state *ps)
745{
746 memset(ps, -1, sizeof(struct page_state));
747}
748
749unsigned long nr_running(void)
750{
751 return -1;
752}
753
754unsigned long nr_iowait(void)
755{
756 return -1;
757}
758
759/*unsigned long nr_context_switches(void)
760{
761 return -1;
762}*/
763#endif /* MODULE */
764EXPORT_SYMBOL_GPL(si_swapinfo);
765EXPORT_SYMBOL_GPL(nr_threads);
766EXPORT_SYMBOL_GPL(avenrun);
767EXPORT_SYMBOL_GPL(get_full_page_state);
768EXPORT_SYMBOL_GPL(nr_running);
769EXPORT_SYMBOL_GPL(nr_iowait);
770//EXPORT_SYMBOL_GPL(nr_context_switches);