blob: 76f1c9357f39d2a285c6bd8af17eb2c4aac5dcad [file] [log] [blame]
Robert Richterdcfce4a2011-10-11 17:11:08 +02001/**
2 * @file nmi_timer_int.c
3 *
4 * @remark Copyright 2011 Advanced Micro Devices, Inc.
5 *
6 * @author Robert Richter <robert.richter@amd.com>
7 */
8
9#include <linux/init.h>
10#include <linux/smp.h>
11#include <linux/errno.h>
12#include <linux/oprofile.h>
13#include <linux/perf_event.h>
14
15#ifdef CONFIG_OPROFILE_NMI_TIMER
16
17static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events);
18static int ctr_running;
19
20static struct perf_event_attr nmi_timer_attr = {
21 .type = PERF_TYPE_HARDWARE,
22 .config = PERF_COUNT_HW_CPU_CYCLES,
23 .size = sizeof(struct perf_event_attr),
24 .pinned = 1,
25 .disabled = 1,
26};
27
28static void nmi_timer_callback(struct perf_event *event,
29 struct perf_sample_data *data,
30 struct pt_regs *regs)
31{
32 event->hw.interrupts = 0; /* don't throttle interrupts */
33 oprofile_add_sample(regs, 0);
34}
35
36static int nmi_timer_start_cpu(int cpu)
37{
38 struct perf_event *event = per_cpu(nmi_timer_events, cpu);
39
40 if (!event) {
41 event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL,
42 nmi_timer_callback, NULL);
43 if (IS_ERR(event))
44 return PTR_ERR(event);
45 per_cpu(nmi_timer_events, cpu) = event;
46 }
47
48 if (event && ctr_running)
49 perf_event_enable(event);
50
51 return 0;
52}
53
54static void nmi_timer_stop_cpu(int cpu)
55{
56 struct perf_event *event = per_cpu(nmi_timer_events, cpu);
57
58 if (event && ctr_running)
59 perf_event_disable(event);
60}
61
62static int nmi_timer_cpu_notifier(struct notifier_block *b, unsigned long action,
63 void *data)
64{
65 int cpu = (unsigned long)data;
66 switch (action) {
67 case CPU_DOWN_FAILED:
68 case CPU_ONLINE:
69 nmi_timer_start_cpu(cpu);
70 break;
71 case CPU_DOWN_PREPARE:
72 nmi_timer_stop_cpu(cpu);
73 break;
74 }
75 return NOTIFY_DONE;
76}
77
78static struct notifier_block nmi_timer_cpu_nb = {
79 .notifier_call = nmi_timer_cpu_notifier
80};
81
82static int nmi_timer_start(void)
83{
84 int cpu;
85
86 get_online_cpus();
87 ctr_running = 1;
88 for_each_online_cpu(cpu)
89 nmi_timer_start_cpu(cpu);
90 put_online_cpus();
91
92 return 0;
93}
94
95static void nmi_timer_stop(void)
96{
97 int cpu;
98
99 get_online_cpus();
100 for_each_online_cpu(cpu)
101 nmi_timer_stop_cpu(cpu);
102 ctr_running = 0;
103 put_online_cpus();
104}
105
106static void nmi_timer_shutdown(void)
107{
108 struct perf_event *event;
109 int cpu;
110
111 get_online_cpus();
112 unregister_cpu_notifier(&nmi_timer_cpu_nb);
113 for_each_possible_cpu(cpu) {
114 event = per_cpu(nmi_timer_events, cpu);
115 if (!event)
116 continue;
117 perf_event_disable(event);
118 per_cpu(nmi_timer_events, cpu) = NULL;
119 perf_event_release_kernel(event);
120 }
121
122 put_online_cpus();
123}
124
125static int nmi_timer_setup(void)
126{
127 int cpu, err;
128 u64 period;
129
130 /* clock cycles per tick: */
131 period = (u64)cpu_khz * 1000;
132 do_div(period, HZ);
133 nmi_timer_attr.sample_period = period;
134
135 get_online_cpus();
136 err = register_cpu_notifier(&nmi_timer_cpu_nb);
137 if (err)
138 goto out;
139 /* can't attach events to offline cpus: */
140 for_each_online_cpu(cpu) {
141 err = nmi_timer_start_cpu(cpu);
142 if (err)
143 break;
144 }
145 if (err)
146 nmi_timer_shutdown();
147out:
148 put_online_cpus();
149 return err;
150}
151
152int __init op_nmi_timer_init(struct oprofile_operations *ops)
153{
154 int err = 0;
155
156 err = nmi_timer_setup();
157 if (err)
158 return err;
159 nmi_timer_shutdown(); /* only check, don't alloc */
160
161 ops->create_files = NULL;
162 ops->setup = nmi_timer_setup;
163 ops->shutdown = nmi_timer_shutdown;
164 ops->start = nmi_timer_start;
165 ops->stop = nmi_timer_stop;
166 ops->cpu_type = "timer";
167
168 printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
169
170 return 0;
171}
172
173#endif