Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_KERNEL_STAT_H |
| 2 | #define _LINUX_KERNEL_STAT_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/smp.h> |
| 5 | #include <linux/threads.h> |
| 6 | #include <linux/percpu.h> |
Ingo Molnar | 28ef358 | 2005-11-07 00:59:29 -0800 | [diff] [blame] | 7 | #include <linux/cpumask.h> |
Keika Kobayashi | aa0ce5b | 2009-06-17 16:25:52 -0700 | [diff] [blame] | 8 | #include <linux/interrupt.h> |
Glauber Costa | 3292beb | 2011-11-28 14:45:17 -0200 | [diff] [blame] | 9 | #include <linux/sched.h> |
Alan Mayer | 6859a84 | 2008-03-26 16:11:31 -0500 | [diff] [blame] | 10 | #include <asm/irq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <asm/cputime.h> |
| 12 | |
| 13 | /* |
| 14 | * 'kernel_stat.h' contains the definitions needed for doing |
| 15 | * some kernel statistics (CPU usage, context switches ...), |
| 16 | * used by rstatd/perfmeter |
| 17 | */ |
| 18 | |
Glauber Costa | 3292beb | 2011-11-28 14:45:17 -0200 | [diff] [blame] | 19 | enum cpu_usage_stat { |
| 20 | CPUTIME_USER, |
| 21 | CPUTIME_NICE, |
| 22 | CPUTIME_SYSTEM, |
| 23 | CPUTIME_SOFTIRQ, |
| 24 | CPUTIME_IRQ, |
| 25 | CPUTIME_IDLE, |
| 26 | CPUTIME_IOWAIT, |
| 27 | CPUTIME_STEAL, |
| 28 | CPUTIME_GUEST, |
| 29 | CPUTIME_GUEST_NICE, |
| 30 | NR_STATS, |
| 31 | }; |
| 32 | |
| 33 | struct kernel_cpustat { |
| 34 | u64 cpustat[NR_STATS]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | }; |
| 36 | |
| 37 | struct kernel_stat { |
Yinghai Lu | d7e51e6 | 2009-01-07 15:03:13 -0800 | [diff] [blame] | 38 | #ifndef CONFIG_GENERIC_HARDIRQS |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 39 | unsigned int irqs[NR_IRQS]; |
| 40 | #endif |
KAMEZAWA Hiroyuki | f2c66cd | 2010-10-27 15:34:13 -0700 | [diff] [blame] | 41 | unsigned long irqs_sum; |
Keika Kobayashi | aa0ce5b | 2009-06-17 16:25:52 -0700 | [diff] [blame] | 42 | unsigned int softirqs[NR_SOFTIRQS]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | }; |
| 44 | |
| 45 | DECLARE_PER_CPU(struct kernel_stat, kstat); |
Glauber Costa | 3292beb | 2011-11-28 14:45:17 -0200 | [diff] [blame] | 46 | DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | /* Must have preemption disabled for this to be meaningful. */ |
Glauber Costa | 3292beb | 2011-11-28 14:45:17 -0200 | [diff] [blame] | 49 | #define kstat_this_cpu (&__get_cpu_var(kstat)) |
| 50 | #define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat)) |
| 51 | #define kstat_cpu(cpu) per_cpu(kstat, cpu) |
| 52 | #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | |
| 54 | extern unsigned long long nr_context_switches(void); |
| 55 | |
Yinghai Lu | d7e51e6 | 2009-01-07 15:03:13 -0800 | [diff] [blame] | 56 | #ifndef CONFIG_GENERIC_HARDIRQS |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 57 | |
Thomas Gleixner | d6c88a5 | 2008-10-15 15:27:23 +0200 | [diff] [blame] | 58 | struct irq_desc; |
Yinghai Lu | 8c464a4 | 2008-08-25 12:41:19 -0700 | [diff] [blame] | 59 | |
Thomas Gleixner | d6c88a5 | 2008-10-15 15:27:23 +0200 | [diff] [blame] | 60 | static inline void kstat_incr_irqs_this_cpu(unsigned int irq, |
| 61 | struct irq_desc *desc) |
| 62 | { |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 63 | __this_cpu_inc(kstat.irqs[irq]); |
| 64 | __this_cpu_inc(kstat.irqs_sum); |
Thomas Gleixner | d6c88a5 | 2008-10-15 15:27:23 +0200 | [diff] [blame] | 65 | } |
Yinghai Lu | 8c464a4 | 2008-08-25 12:41:19 -0700 | [diff] [blame] | 66 | |
Yinghai Lu | 7f95ec9 | 2008-08-19 20:50:09 -0700 | [diff] [blame] | 67 | static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
| 68 | { |
| 69 | return kstat_cpu(cpu).irqs[irq]; |
| 70 | } |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 71 | #else |
Yinghai Lu | d52a61c | 2009-01-22 00:38:56 -0800 | [diff] [blame] | 72 | #include <linux/irq.h> |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 73 | extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 74 | |
| 75 | #define kstat_incr_irqs_this_cpu(irqno, DESC) \ |
| 76 | do { \ |
| 77 | __this_cpu_inc(*(DESC)->kstat_irqs); \ |
| 78 | __this_cpu_inc(kstat.irqs_sum); \ |
| 79 | } while (0) |
Yinghai Lu | d52a61c | 2009-01-22 00:38:56 -0800 | [diff] [blame] | 80 | |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 81 | #endif |
Yinghai Lu | 7f95ec9 | 2008-08-19 20:50:09 -0700 | [diff] [blame] | 82 | |
Keika Kobayashi | aa0ce5b | 2009-06-17 16:25:52 -0700 | [diff] [blame] | 83 | static inline void kstat_incr_softirqs_this_cpu(unsigned int irq) |
| 84 | { |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 85 | __this_cpu_inc(kstat.softirqs[irq]); |
Keika Kobayashi | aa0ce5b | 2009-06-17 16:25:52 -0700 | [diff] [blame] | 86 | } |
| 87 | |
| 88 | static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) |
| 89 | { |
| 90 | return kstat_cpu(cpu).softirqs[irq]; |
| 91 | } |
| 92 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | /* |
| 94 | * Number of interrupts per specific IRQ source, since bootup |
| 95 | */ |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 96 | #ifndef CONFIG_GENERIC_HARDIRQS |
Yinghai Lu | 7f95ec9 | 2008-08-19 20:50:09 -0700 | [diff] [blame] | 97 | static inline unsigned int kstat_irqs(unsigned int irq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | { |
Yinghai Lu | 7f95ec9 | 2008-08-19 20:50:09 -0700 | [diff] [blame] | 99 | unsigned int sum = 0; |
| 100 | int cpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 102 | for_each_possible_cpu(cpu) |
Yinghai Lu | 7f95ec9 | 2008-08-19 20:50:09 -0700 | [diff] [blame] | 103 | sum += kstat_irqs_cpu(irq, cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | |
| 105 | return sum; |
| 106 | } |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 107 | #else |
| 108 | extern unsigned int kstat_irqs(unsigned int irq); |
| 109 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | |
KAMEZAWA Hiroyuki | f2c66cd | 2010-10-27 15:34:13 -0700 | [diff] [blame] | 111 | /* |
| 112 | * Number of interrupts per cpu, since bootup |
| 113 | */ |
| 114 | static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) |
| 115 | { |
| 116 | return kstat_cpu(cpu).irqs_sum; |
| 117 | } |
Ingo Molnar | aa9c4c0 | 2008-12-17 14:10:57 +0100 | [diff] [blame] | 118 | |
| 119 | /* |
| 120 | * Lock/unlock the current runqueue - to extract task statistics: |
| 121 | */ |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 122 | extern unsigned long long task_delta_exec(struct task_struct *); |
Ingo Molnar | aa9c4c0 | 2008-12-17 14:10:57 +0100 | [diff] [blame] | 123 | |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 124 | extern void account_user_time(struct task_struct *, cputime_t, cputime_t); |
| 125 | extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 126 | extern void account_steal_time(cputime_t); |
| 127 | extern void account_idle_time(cputime_t); |
| 128 | |
| 129 | extern void account_process_tick(struct task_struct *, int user); |
| 130 | extern void account_steal_ticks(unsigned long ticks); |
| 131 | extern void account_idle_ticks(unsigned long ticks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | |
| 133 | #endif /* _LINUX_KERNEL_STAT_H */ |