blob: 44e83ba12b5b1076e4a1af7624420a9c2762ed2f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_KERNEL_STAT_H
2#define _LINUX_KERNEL_STAT_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/smp.h>
5#include <linux/threads.h>
6#include <linux/percpu.h>
Ingo Molnar28ef3582005-11-07 00:59:29 -08007#include <linux/cpumask.h>
Keika Kobayashiaa0ce5b2009-06-17 16:25:52 -07008#include <linux/interrupt.h>
Alan Mayer6859a842008-03-26 16:11:31 -05009#include <asm/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <asm/cputime.h>
11
12/*
13 * 'kernel_stat.h' contains the definitions needed for doing
14 * some kernel statistics (CPU usage, context switches ...),
15 * used by rstatd/perfmeter
16 */
17
18struct cpu_usage_stat {
19 cputime64_t user;
20 cputime64_t nice;
21 cputime64_t system;
22 cputime64_t softirq;
23 cputime64_t irq;
24 cputime64_t idle;
25 cputime64_t iowait;
26 cputime64_t steal;
Laurent Vivier5e84cfd2007-10-15 17:00:19 +020027 cputime64_t guest;
Ryota Ozakice0e7b22009-10-24 01:20:10 +090028 cputime64_t guest_nice;
Linus Torvalds1da177e2005-04-16 15:20:36 -070029};
30
31struct kernel_stat {
32 struct cpu_usage_stat cpustat;
Yinghai Lud7e51e62009-01-07 15:03:13 -080033#ifndef CONFIG_GENERIC_HARDIRQS
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080034 unsigned int irqs[NR_IRQS];
35#endif
KAMEZAWA Hiroyukif2c66cd2010-10-27 15:34:13 -070036 unsigned long irqs_sum;
Keika Kobayashiaa0ce5b2009-06-17 16:25:52 -070037 unsigned int softirqs[NR_SOFTIRQS];
Linus Torvalds1da177e2005-04-16 15:20:36 -070038};
39
40DECLARE_PER_CPU(struct kernel_stat, kstat);
41
42#define kstat_cpu(cpu) per_cpu(kstat, cpu)
43/* Must have preemption disabled for this to be meaningful. */
44#define kstat_this_cpu __get_cpu_var(kstat)
45
46extern unsigned long long nr_context_switches(void);
47
Yinghai Lud7e51e62009-01-07 15:03:13 -080048#ifndef CONFIG_GENERIC_HARDIRQS
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080049#define kstat_irqs_this_cpu(irq) \
Christoph Lameter909ea962010-12-08 16:22:55 +010050 (this_cpu_read(kstat.irqs[irq])
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080051
Thomas Gleixnerd6c88a52008-10-15 15:27:23 +020052struct irq_desc;
Yinghai Lu8c464a42008-08-25 12:41:19 -070053
Thomas Gleixnerd6c88a52008-10-15 15:27:23 +020054static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
55 struct irq_desc *desc)
56{
57 kstat_this_cpu.irqs[irq]++;
KAMEZAWA Hiroyukif2c66cd2010-10-27 15:34:13 -070058 kstat_this_cpu.irqs_sum++;
Thomas Gleixnerd6c88a52008-10-15 15:27:23 +020059}
Yinghai Lu8c464a42008-08-25 12:41:19 -070060
Yinghai Lu7f95ec92008-08-19 20:50:09 -070061static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
62{
63 return kstat_cpu(cpu).irqs[irq];
64}
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080065#else
Yinghai Lud52a61c2009-01-22 00:38:56 -080066#include <linux/irq.h>
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080067extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
Yinghai Lud52a61c2009-01-22 00:38:56 -080068#define kstat_irqs_this_cpu(DESC) \
69 ((DESC)->kstat_irqs[smp_processor_id()])
KAMEZAWA Hiroyukif2c66cd2010-10-27 15:34:13 -070070#define kstat_incr_irqs_this_cpu(irqno, DESC) do {\
71 ((DESC)->kstat_irqs[smp_processor_id()]++);\
72 kstat_this_cpu.irqs_sum++; } while (0)
Yinghai Lud52a61c2009-01-22 00:38:56 -080073
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080074#endif
Yinghai Lu7f95ec92008-08-19 20:50:09 -070075
Keika Kobayashiaa0ce5b2009-06-17 16:25:52 -070076static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
77{
78 kstat_this_cpu.softirqs[irq]++;
79}
80
81static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
82{
83 return kstat_cpu(cpu).softirqs[irq];
84}
85
Linus Torvalds1da177e2005-04-16 15:20:36 -070086/*
87 * Number of interrupts per specific IRQ source, since bootup
88 */
KAMEZAWA Hiroyuki478735e32010-10-27 15:34:15 -070089#ifndef CONFIG_GENERIC_HARDIRQS
Yinghai Lu7f95ec92008-08-19 20:50:09 -070090static inline unsigned int kstat_irqs(unsigned int irq)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
Yinghai Lu7f95ec92008-08-19 20:50:09 -070092 unsigned int sum = 0;
93 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -080095 for_each_possible_cpu(cpu)
Yinghai Lu7f95ec92008-08-19 20:50:09 -070096 sum += kstat_irqs_cpu(irq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98 return sum;
99}
KAMEZAWA Hiroyuki478735e32010-10-27 15:34:15 -0700100#else
101extern unsigned int kstat_irqs(unsigned int irq);
102#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
KAMEZAWA Hiroyukif2c66cd2010-10-27 15:34:13 -0700104/*
105 * Number of interrupts per cpu, since bootup
106 */
107static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
108{
109 return kstat_cpu(cpu).irqs_sum;
110}
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100111
112/*
113 * Lock/unlock the current runqueue - to extract task statistics:
114 */
Frank Mayharbb34d922008-09-12 09:54:39 -0700115extern unsigned long long task_delta_exec(struct task_struct *);
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100116
Martin Schwidefsky457533a2008-12-31 15:11:37 +0100117extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
118extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +0100119extern void account_steal_time(cputime_t);
120extern void account_idle_time(cputime_t);
121
122extern void account_process_tick(struct task_struct *, int user);
123extern void account_steal_ticks(unsigned long ticks);
124extern void account_idle_ticks(unsigned long ticks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
126#endif /* _LINUX_KERNEL_STAT_H */