blob: 6d74a7c77c8ca8eaaccf10ee72f69653f3043dcb [file] [log] [blame]
Peter Zijlstra029632f2011-10-25 10:00:11 +02001
2#include <linux/slab.h>
3#include <linux/fs.h>
4#include <linux/seq_file.h>
5#include <linux/proc_fs.h>
6
7#include "sched.h"
8
9/*
10 * bump this up when changing the output format or the meaning of an existing
11 * format, so that tools can adapt (or abort)
12 */
13#define SCHEDSTAT_VERSION 15
14
Chris Redpath385dcec2017-06-03 15:03:03 +010015#ifdef CONFIG_SMP
Dietmar Eggemannaf88a162017-03-22 18:23:13 +000016static inline void show_easstat(struct seq_file *seq, struct eas_stats *stats)
17{
18 /* eas-specific runqueue stats */
19 seq_printf(seq, "eas %llu %llu %llu %llu %llu %llu ",
20 stats->sis_attempts, stats->sis_idle, stats->sis_cache_affine,
21 stats->sis_suff_cap, stats->sis_idle_cpu, stats->sis_count);
22
23 seq_printf(seq, "%llu %llu %llu %llu %llu %llu %llu ",
24 stats->secb_attempts, stats->secb_sync, stats->secb_idle_bt,
25 stats->secb_insuff_cap, stats->secb_no_nrg_sav,
26 stats->secb_nrg_sav, stats->secb_count);
27
28 seq_printf(seq, "%llu %llu %llu %llu %llu ",
29 stats->fbt_attempts, stats->fbt_no_cpu, stats->fbt_no_sd,
30 stats->fbt_pref_idle, stats->fbt_count);
31
32 seq_printf(seq, "%llu %llu\n",
33 stats->cas_attempts, stats->cas_count);
34}
Chris Redpath385dcec2017-06-03 15:03:03 +010035#endif
Dietmar Eggemannaf88a162017-03-22 18:23:13 +000036
Peter Zijlstra029632f2011-10-25 10:00:11 +020037static int show_schedstat(struct seq_file *seq, void *v)
38{
39 int cpu;
Peter Zijlstra029632f2011-10-25 10:00:11 +020040
Nathan Zimmercb152ff2013-02-21 15:15:08 -080041 if (v == (void *)1) {
42 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
43 seq_printf(seq, "timestamp %lu\n", jiffies);
44 } else {
45 struct rq *rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +020046#ifdef CONFIG_SMP
47 struct sched_domain *sd;
48 int dcount = 0;
49#endif
Nathan Zimmercb152ff2013-02-21 15:15:08 -080050 cpu = (unsigned long)(v - 2);
51 rq = cpu_rq(cpu);
Peter Zijlstra029632f2011-10-25 10:00:11 +020052
53 /* runqueue-specific stats */
54 seq_printf(seq,
Rakib Mullick30fd0492012-01-24 22:33:56 +060055 "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
Peter Zijlstra029632f2011-10-25 10:00:11 +020056 cpu, rq->yld_count,
Rakib Mullick30fd0492012-01-24 22:33:56 +060057 rq->sched_count, rq->sched_goidle,
Peter Zijlstra029632f2011-10-25 10:00:11 +020058 rq->ttwu_count, rq->ttwu_local,
59 rq->rq_cpu_time,
60 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
61
62 seq_printf(seq, "\n");
63
Chris Redpath385dcec2017-06-03 15:03:03 +010064#ifdef CONFIG_SMP
Dietmar Eggemannaf88a162017-03-22 18:23:13 +000065 show_easstat(seq, &rq->eas_stats);
66
Peter Zijlstra029632f2011-10-25 10:00:11 +020067 /* domain-specific stats */
68 rcu_read_lock();
69 for_each_domain(cpu, sd) {
70 enum cpu_idle_type itype;
71
Tejun Heo333470e2015-02-13 14:37:28 -080072 seq_printf(seq, "domain%d %*pb", dcount++,
73 cpumask_pr_args(sched_domain_span(sd)));
Peter Zijlstra029632f2011-10-25 10:00:11 +020074 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
75 itype++) {
76 seq_printf(seq, " %u %u %u %u %u %u %u %u",
77 sd->lb_count[itype],
78 sd->lb_balanced[itype],
79 sd->lb_failed[itype],
80 sd->lb_imbalance[itype],
81 sd->lb_gained[itype],
82 sd->lb_hot_gained[itype],
83 sd->lb_nobusyq[itype],
84 sd->lb_nobusyg[itype]);
85 }
86 seq_printf(seq,
87 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
88 sd->alb_count, sd->alb_failed, sd->alb_pushed,
89 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
90 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
91 sd->ttwu_wake_remote, sd->ttwu_move_affine,
92 sd->ttwu_move_balance);
Dietmar Eggemannaf88a162017-03-22 18:23:13 +000093
94 show_easstat(seq, &sd->eas_stats);
Peter Zijlstra029632f2011-10-25 10:00:11 +020095 }
96 rcu_read_unlock();
97#endif
98 }
Peter Zijlstra029632f2011-10-25 10:00:11 +020099 return 0;
100}
101
Nathan Zimmercb152ff2013-02-21 15:15:08 -0800102/*
103 * This itererator needs some explanation.
104 * It returns 1 for the header position.
105 * This means 2 is cpu 0.
106 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
107 * to use cpumask_* to iterate over the cpus.
108 */
109static void *schedstat_start(struct seq_file *file, loff_t *offset)
110{
111 unsigned long n = *offset;
112
113 if (n == 0)
114 return (void *) 1;
115
116 n--;
117
118 if (n > 0)
119 n = cpumask_next(n - 1, cpu_online_mask);
120 else
121 n = cpumask_first(cpu_online_mask);
122
123 *offset = n + 1;
124
125 if (n < nr_cpu_ids)
126 return (void *)(unsigned long)(n + 2);
127 return NULL;
128}
129
130static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
131{
132 (*offset)++;
133 return schedstat_start(file, offset);
134}
135
136static void schedstat_stop(struct seq_file *file, void *data)
137{
138}
139
140static const struct seq_operations schedstat_sops = {
141 .start = schedstat_start,
142 .next = schedstat_next,
143 .stop = schedstat_stop,
144 .show = show_schedstat,
145};
146
Peter Zijlstra029632f2011-10-25 10:00:11 +0200147static int schedstat_open(struct inode *inode, struct file *file)
148{
Nathan Zimmercb152ff2013-02-21 15:15:08 -0800149 return seq_open(file, &schedstat_sops);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200150}
151
152static const struct file_operations proc_schedstat_operations = {
153 .open = schedstat_open,
154 .read = seq_read,
155 .llseek = seq_lseek,
Al Viro8e0bcc72013-04-15 13:29:15 -0400156 .release = seq_release,
Peter Zijlstra029632f2011-10-25 10:00:11 +0200157};
158
159static int __init proc_schedstat_init(void)
160{
161 proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
162 return 0;
163}
Paul Gortmakerc96d6662014-04-03 14:48:35 -0700164subsys_initcall(proc_schedstat_init);